diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 46d4eee72c..77927cfbd3 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -140,18 +140,16 @@ func genRulesSuffix(arch arch, suff string) { loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno) for _, rule2 := range expandOr(rule) { - for _, rule3 := range commute(rule2, arch) { - r := Rule{rule: rule3, loc: loc} - if rawop := strings.Split(rule3, " ")[0][1:]; isBlock(rawop, arch) { - blockrules[rawop] = append(blockrules[rawop], r) - continue - } - // Do fancier value op matching. - match, _, _ := r.parse() - op, oparch, _, _, _, _ := parseValue(match, arch, loc) - opname := fmt.Sprintf("Op%s%s", oparch, op.name) - oprules[opname] = append(oprules[opname], r) + r := Rule{rule: rule2, loc: loc} + if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) { + blockrules[rawop] = append(blockrules[rawop], r) + continue } + // Do fancier value op matching. + match, _, _ := r.parse() + op, oparch, _, _, _, _ := parseValue(match, arch, loc) + opname := fmt.Sprintf("Op%s%s", oparch, op.name) + oprules[opname] = append(oprules[opname], r) } rule = "" ruleLineno = 0 @@ -489,6 +487,8 @@ func (u *unusedInspector) node(node ast.Node) { u.scope.objects[name.Name] = obj case *ast.ReturnStmt: u.exprs(node.Results) + case *ast.IncDecStmt: + u.node(node.X) // expressions @@ -554,6 +554,7 @@ type object struct { func fprint(w io.Writer, n Node) { switch n := n.(type) { case *File: + file := n seenRewrite := make(map[[3]string]string) fmt.Fprintf(w, "// Code generated from gen/%s%s.rules; DO NOT EDIT.\n", n.arch.name, n.suffix) fmt.Fprintf(w, "// generated with: cd gen; go run *.go\n") @@ -575,7 +576,11 @@ func fprint(w io.Writer, n Node) { fprint(w, n) if rr, ok := n.(*RuleRewrite); ok { - k := [3]string{rr.match, rr.cond, rr.result} + k := [3]string{ + normalizeMatch(rr.match, file.arch), + normalizeWhitespace(rr.cond), + normalizeWhitespace(rr.result), + } if prev, ok := seenRewrite[k]; ok { log.Fatalf("duplicate rule %s, previously seen at %s\n", rr.loc, prev) } else { @@ -610,10 +615,27 @@ func fprint(w io.Writer, n Node) { } fmt.Fprintf(w, "// result: %s\n", n.result) fmt.Fprintf(w, "for %s {\n", n.check) + nCommutative := 0 for _, n := range n.list { + if b, ok := n.(*CondBreak); ok { + b.insideCommuteLoop = nCommutative > 0 + } fprint(w, n) + if loop, ok := n.(StartCommuteLoop); ok { + if nCommutative != loop.depth { + panic("mismatch commute loop depth") + } + nCommutative++ + } } - fmt.Fprintf(w, "return true\n}\n") + fmt.Fprintf(w, "return true\n") + for i := 0; i < nCommutative; i++ { + fmt.Fprintln(w, "}") + } + if n.commuteDepth > 0 && n.canFail { + fmt.Fprint(w, "break\n") + } + fmt.Fprintf(w, "}\n") case *Declare: fmt.Fprintf(w, "%s := ", n.name) fprint(w, n.value) @@ -621,12 +643,20 @@ func fprint(w io.Writer, n Node) { case *CondBreak: fmt.Fprintf(w, "if ") fprint(w, n.expr) - fmt.Fprintf(w, " {\nbreak\n}\n") + fmt.Fprintf(w, " {\n") + if n.insideCommuteLoop { + fmt.Fprintf(w, "continue") + } else { + fmt.Fprintf(w, "break") + } + fmt.Fprintf(w, "\n}\n") case ast.Node: printConfig.Fprint(w, emptyFset, n) if _, ok := n.(ast.Stmt); ok { fmt.Fprintln(w) } + case StartCommuteLoop: + fmt.Fprintf(w, "for _i%d := 0; _i%d <= 1; _i%d++ {\n", n.depth, n.depth, n.depth) default: log.Fatalf("cannot print %T", n) } @@ -714,15 +744,20 @@ type ( match, cond, result string // top comments check string // top-level boolean expression - alloc int // for unique var names - loc string // file name & line number of the original rule + alloc int // for unique var names + loc string // file name & line number of the original rule + commuteDepth int // used to track depth of commute loops } Declare struct { name string value ast.Expr } CondBreak struct { - expr ast.Expr + expr ast.Expr + insideCommuteLoop bool + } + StartCommuteLoop struct { + depth int } ) @@ -759,7 +794,7 @@ func declf(name, format string, a ...interface{}) *Declare { // breakf constructs a simple "if cond { break }" statement, using exprf for its // condition. func breakf(format string, a ...interface{}) *CondBreak { - return &CondBreak{exprf(format, a...)} + return &CondBreak{expr: exprf(format, a...)} } func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { @@ -779,7 +814,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { cname := fmt.Sprintf("b.Controls[%v]", i) vname := fmt.Sprintf("v_%v", i) rr.add(declf(vname, cname)) - p, op := genMatch0(rr, arch, arg, vname) + p, op := genMatch0(rr, arch, arg, vname, nil) // TODO: pass non-nil cnt? if op != "" { check := fmt.Sprintf("%s.Op == %s", cname, op) if rr.check == "" { @@ -893,10 +928,11 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { // genMatch returns the variable whose source position should be used for the // result (or "" if no opinion), and a boolean that reports whether the match can fail. func genMatch(rr *RuleRewrite, arch arch, match string) (pos, checkOp string) { - return genMatch0(rr, arch, match, "v") + cnt := varCount(rr.match, rr.cond) + return genMatch0(rr, arch, match, "v", cnt) } -func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string) { +func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) (pos, checkOp string) { if match[0] != '(' || match[len(match)-1] != ')' { log.Fatalf("non-compound expr in genMatch0: %q", match) } @@ -927,10 +963,20 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string } } + commutative := op.commutative + if commutative { + if args[0] == args[1] { + commutative = false + } + if cnt[args[0]] == 1 && cnt[args[1]] == 1 { + commutative = false + } + } + // Access last argument first to minimize bounds checks. if n := len(args); n > 1 { a := args[n-1] - if a != "_" && !rr.declared(a) && token.IsIdentifier(a) { + if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) { rr.add(declf(a, "%s.Args[%d]", v, n-1)) // delete the last argument so it is not reprocessed @@ -939,7 +985,22 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string rr.add(stmtf("_ = %s.Args[%d]", v, n-1)) } } + var commuteDepth int + if commutative { + commuteDepth = rr.commuteDepth + rr.add(StartCommuteLoop{commuteDepth}) + rr.commuteDepth++ + } for i, arg := range args { + argidx := strconv.Itoa(i) + if commutative { + switch i { + case 0: + argidx = fmt.Sprintf("_i%d", commuteDepth) + case 1: + argidx = fmt.Sprintf("1^_i%d", commuteDepth) + } + } if arg == "_" { continue } @@ -950,9 +1011,9 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string // the old definition and the new definition match. // For example, (add x x). Equality is just pointer equality // on Values (so cse is important to do before lowering). - rr.add(breakf("%s != %s.Args[%d]", arg, v, i)) + rr.add(breakf("%s != %s.Args[%s]", arg, v, argidx)) } else { - rr.add(declf(arg, "%s.Args[%d]", v, i)) + rr.add(declf(arg, "%s.Args[%s]", v, argidx)) } continue } @@ -969,10 +1030,10 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string log.Fatalf("don't name args 'b', it is ambiguous with blocks") } - rr.add(declf(argname, "%s.Args[%d]", v, i)) + rr.add(declf(argname, "%s.Args[%s]", v, argidx)) bexpr := exprf("%s.Op != addLater", argname) rr.add(&CondBreak{expr: bexpr}) - argPos, argCheckOp := genMatch0(rr, arch, arg, argname) + argPos, argCheckOp := genMatch0(rr, arch, arg, argname, cnt) bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp if argPos != "" { @@ -1334,99 +1395,6 @@ func expandOr(r string) []string { return res } -// commute returns all equivalent rules to r after applying all possible -// argument swaps to the commutable ops in r. -// Potentially exponential, be careful. -func commute(r string, arch arch) []string { - match, cond, result := Rule{rule: r}.parse() - a := commute1(match, varCount(match, cond), arch) - for i, m := range a { - if cond != "" { - m += " && " + cond - } - m += " -> " + result - a[i] = m - } - if len(a) == 1 && normalizeWhitespace(r) != normalizeWhitespace(a[0]) { - fmt.Println(normalizeWhitespace(r)) - fmt.Println(normalizeWhitespace(a[0])) - log.Fatalf("commute() is not the identity for noncommuting rule") - } - if false && len(a) > 1 { - fmt.Println(r) - for _, x := range a { - fmt.Println(" " + x) - } - } - return a -} - -func commute1(m string, cnt map[string]int, arch arch) []string { - if m[0] == '<' || m[0] == '[' || m[0] == '{' || token.IsIdentifier(m) { - return []string{m} - } - // Split up input. - var prefix string - if i := strings.Index(m, ":"); i >= 0 && token.IsIdentifier(m[:i]) { - prefix = m[:i+1] - m = m[i+1:] - } - if m[0] != '(' || m[len(m)-1] != ')' { - log.Fatalf("non-compound expr in commute1: %q", m) - } - s := split(m[1 : len(m)-1]) - op := s[0] - - commutative := opIsCommutative(op, arch) - var idx0, idx1 int - if commutative { - // Find indexes of two args we can swap. - for i, arg := range s { - if i == 0 || arg[0] == '<' || arg[0] == '[' || arg[0] == '{' { - continue - } - if idx0 == 0 { - idx0 = i - continue - } - if idx1 == 0 { - idx1 = i - break - } - } - if idx1 == 0 { - log.Fatalf("couldn't find first two args of commutative op %q", s[0]) - } - if cnt[s[idx0]] == 1 && cnt[s[idx1]] == 1 || s[idx0] == s[idx1] { - // When we have (Add x y) with no other uses of x and y in the matching rule, - // then we can skip the commutative match (Add y x). - // Same for (Add x x), for any x. - commutative = false - } - } - - // Recursively commute arguments. - a := make([][]string, len(s)) - for i, arg := range s { - a[i] = commute1(arg, cnt, arch) - } - - // Choose all possibilities from all args. - r := crossProduct(a) - - // If commutative, do that again with its two args reversed. - if commutative { - a[idx0], a[idx1] = a[idx1], a[idx0] - r = append(r, crossProduct(a)...) - } - - // Construct result. - for i, x := range r { - r[i] = prefix + "(" + x + ")" - } - return r -} - // varCount returns a map which counts the number of occurrences of // Value variables in the s-expression "match" and the Go expression "cond". func varCount(match, cond string) map[string]int { @@ -1469,22 +1437,6 @@ func varCount1(m string, cnt map[string]int) { } } -// crossProduct returns all possible values -// x[0][i] + " " + x[1][j] + " " + ... + " " + x[len(x)-1][k] -// for all valid values of i, j, ..., k. -func crossProduct(x [][]string) []string { - if len(x) == 1 { - return x[0] - } - var r []string - for _, tail := range crossProduct(x[1:]) { - for _, first := range x[0] { - r = append(r, first+" "+tail) - } - } - return r -} - // normalizeWhitespace replaces 2+ whitespace sequences with a single space. func normalizeWhitespace(x string) string { x = strings.Join(strings.Fields(x), " ") @@ -1516,3 +1468,26 @@ func opIsCommutative(op string, arch arch) bool { } return false } + +func normalizeMatch(m string, arch arch) string { + if token.IsIdentifier(m) { + return m + } + op, typ, auxint, aux, args := extract(m) + if opIsCommutative(op, arch) { + if args[1] < args[0] { + args[0], args[1] = args[1], args[0] + } + } + s := new(strings.Builder) + fmt.Fprintf(s, "%s <%s> [%s] {%s}", op, typ, auxint, aux) + for _, arg := range args { + var prefix string + if i := strings.Index(arg, ":"); i >= 0 && token.IsIdentifier(arg[:i]) { + prefix = arg[:i+1] + arg = arg[i+1:] + } + fmt.Fprint(s, " ", prefix, normalizeMatch(arg, arch)) + } + return s.String() +} diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index dba61cf347..464e03ee41 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -11,7 +11,7 @@ func rewriteValue386(v *Value) bool { case Op386ADCL: return rewriteValue386_Op386ADCL_0(v) case Op386ADDL: - return rewriteValue386_Op386ADDL_0(v) || rewriteValue386_Op386ADDL_10(v) || rewriteValue386_Op386ADDL_20(v) + return rewriteValue386_Op386ADDL_0(v) || rewriteValue386_Op386ADDL_10(v) case Op386ADDLcarry: return rewriteValue386_Op386ADDLcarry_0(v) case Op386ADDLconst: @@ -105,7 +105,7 @@ func rewriteValue386(v *Value) bool { case Op386MOVBstoreconstidx1: return rewriteValue386_Op386MOVBstoreconstidx1_0(v) case Op386MOVBstoreidx1: - return rewriteValue386_Op386MOVBstoreidx1_0(v) || rewriteValue386_Op386MOVBstoreidx1_10(v) || rewriteValue386_Op386MOVBstoreidx1_20(v) + return rewriteValue386_Op386MOVBstoreidx1_0(v) case Op386MOVLload: return rewriteValue386_Op386MOVLload_0(v) case Op386MOVLloadidx1: @@ -113,7 +113,7 @@ func rewriteValue386(v *Value) bool { case Op386MOVLloadidx4: return rewriteValue386_Op386MOVLloadidx4_0(v) case Op386MOVLstore: - return rewriteValue386_Op386MOVLstore_0(v) || rewriteValue386_Op386MOVLstore_10(v) || rewriteValue386_Op386MOVLstore_20(v) + return rewriteValue386_Op386MOVLstore_0(v) || rewriteValue386_Op386MOVLstore_10(v) case Op386MOVLstoreconst: return rewriteValue386_Op386MOVLstoreconst_0(v) case Op386MOVLstoreconstidx1: @@ -165,7 +165,7 @@ func rewriteValue386(v *Value) bool { case Op386MOVWloadidx2: return rewriteValue386_Op386MOVWloadidx2_0(v) case Op386MOVWstore: - return rewriteValue386_Op386MOVWstore_0(v) || rewriteValue386_Op386MOVWstore_10(v) + return rewriteValue386_Op386MOVWstore_0(v) case Op386MOVWstoreconst: return rewriteValue386_Op386MOVWstoreconst_0(v) case Op386MOVWstoreconstidx1: @@ -173,7 +173,7 @@ func rewriteValue386(v *Value) bool { case Op386MOVWstoreconstidx2: return rewriteValue386_Op386MOVWstoreconstidx2_0(v) case Op386MOVWstoreidx1: - return rewriteValue386_Op386MOVWstoreidx1_0(v) || rewriteValue386_Op386MOVWstoreidx1_10(v) + return rewriteValue386_Op386MOVWstoreidx1_0(v) case Op386MOVWstoreidx2: return rewriteValue386_Op386MOVWstoreidx2_0(v) case Op386MULL: @@ -197,7 +197,7 @@ func rewriteValue386(v *Value) bool { case Op386NOTL: return rewriteValue386_Op386NOTL_0(v) case Op386ORL: - return rewriteValue386_Op386ORL_0(v) || rewriteValue386_Op386ORL_10(v) || rewriteValue386_Op386ORL_20(v) || rewriteValue386_Op386ORL_30(v) || rewriteValue386_Op386ORL_40(v) || rewriteValue386_Op386ORL_50(v) + return rewriteValue386_Op386ORL_0(v) || rewriteValue386_Op386ORL_10(v) case Op386ORLconst: return rewriteValue386_Op386ORLconst_0(v) case Op386ORLconstmodify: @@ -293,7 +293,7 @@ func rewriteValue386(v *Value) bool { case Op386SUBSSload: return rewriteValue386_Op386SUBSSload_0(v) case Op386XORL: - return rewriteValue386_Op386XORL_0(v) || rewriteValue386_Op386XORL_10(v) + return rewriteValue386_Op386XORL_0(v) case Op386XORLconst: return rewriteValue386_Op386XORLconst_0(v) case Op386XORLconstmodify: @@ -712,33 +712,20 @@ func rewriteValue386_Op386ADCL_0(v *Value) bool { // result: (ADCLconst [c] x f) for { f := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386ADCLconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(f) + return true } - c := v_1.AuxInt - v.reset(Op386ADCLconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(f) - return true - } - // match: (ADCL (MOVLconst [c]) x f) - // result: (ADCLconst [c] x f) - for { - f := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(Op386ADCLconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(f) - return true + break } return false } @@ -747,78 +734,46 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // result: (ADDLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386ADDLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(Op386ADDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (MOVLconst [c]) x) - // result: (ADDLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386ADDLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL (SHLLconst [c] x) (SHRLconst [d] x)) // cond: d == 32-c // result: (ROLLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRLconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(Op386ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (SHRLconst [d] x) (SHLLconst [c] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL (SHLLconst x [c]) (SHRWconst x [d])) // cond: c < 16 && d == 16-c && t.Size() == 2 @@ -826,49 +781,27 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { + continue + } + v.reset(Op386ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL (SHLLconst x [c]) (SHRBconst x [d])) // cond: c < 8 && d == 8-c && t.Size() == 1 @@ -876,458 +809,251 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRBconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { + continue + } + v.reset(Op386ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL x (SHLLconst [3] y)) // result: (LEAL8 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ADDL (SHLLconst [3] y) x) - // result: (LEAL8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValue386_Op386ADDL_10(v *Value) bool { // match: (ADDL x (SHLLconst [2] y)) // result: (LEAL4 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (SHLLconst [2] y) x) - // result: (LEAL4 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (SHLLconst [1] y)) // result: (LEAL2 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL2) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (SHLLconst [1] y) x) - // result: (LEAL2 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (ADDL y y)) // result: (LEAL2 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDL { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(Op386LEAL2) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - if y != v_1.Args[0] { - break - } - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (ADDL y y) x) - // result: (LEAL2 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - y := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (ADDL x y)) // result: (LEAL2 y x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(Op386LEAL2) + v.AddArg(y) + v.AddArg(x) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) - return true + break } - // match: (ADDL x (ADDL y x)) - // result: (LEAL2 y x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDL { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDL (ADDL x y) x) - // result: (LEAL2 y x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDL (ADDL y x) x) - // result: (LEAL2 y x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - return false -} -func rewriteValue386_Op386ADDL_20(v *Value) bool { // match: (ADDL (ADDLconst [c] x) y) // result: (LEAL1 [c] x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(Op386LEAL1) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL y (ADDLconst [c] x)) - // result: (LEAL1 [c] x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(Op386LEAL1) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - x := v_1.Args[0] - v.reset(Op386LEAL1) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } + return false +} +func rewriteValue386_Op386ADDL_10(v *Value) bool { // match: (ADDL x (LEAL [c] {s} y)) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAL1 [c] {s} x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386LEAL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386LEAL { + continue + } + c := v_1.AuxInt + s := v_1.Aux + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - s := v_1.Aux - y := v_1.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (LEAL [c] {s} y) x) - // cond: x.Op != OpSB && y.Op != OpSB - // result: (LEAL1 [c] {s} x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { - break - } - c := v_0.AuxInt - s := v_0.Aux - y := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ADDLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ADDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ADDLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ADDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLloadidx4 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLloadidx4 { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ADDLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ADDLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLloadidx4 { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ADDLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (ADDL x (NEGL y)) // result: (SUBL x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386NEGL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386NEGL { + continue + } + y := v_1.Args[0] + v.reset(Op386SUBL) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386SUBL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (NEGL y) x) - // result: (SUBL x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386NEGL { - break - } - y := v_0.Args[0] - v.reset(Op386SUBL) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -1336,30 +1062,19 @@ func rewriteValue386_Op386ADDLcarry_0(v *Value) bool { // result: (ADDLconstcarry [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386ADDLconstcarry) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(Op386ADDLconstcarry) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDLcarry (MOVLconst [c]) x) - // result: (ADDLconstcarry [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386ADDLconstcarry) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -2017,49 +1732,28 @@ func rewriteValue386_Op386ADDSD_0(v *Value) bool { // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVSDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVSDload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + continue + } + v.reset(Op386ADDSDload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386ADDSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) - // result: (ADDSDload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVSDload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386ADDSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -2127,49 +1821,28 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVSSload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVSSload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + continue + } + v.reset(Op386ADDSSload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386ADDSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) - // result: (ADDSSload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVSSload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386ADDSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -2234,132 +1907,77 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { // result: (ANDLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(Op386ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDL (MOVLconst [c]) x) - // result: (ANDLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ANDLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ANDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ANDLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ANDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLloadidx4 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLloadidx4 { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ANDLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ANDLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLloadidx4 { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ANDLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (ANDL x x) // result: x @@ -4003,39 +3621,21 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - y := v_0.Args[1] - x := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - v.reset(Op386LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL [c] {s} (ADDL y x)) - // cond: x.Op != OpSB && y.Op != OpSB - // result: (LEAL1 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) @@ -4161,49 +3761,29 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAL1 [c+d] {s} x y) - for { - c := v.AuxInt - s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) - // cond: is32Bit(c+d) && x.Op != OpSB - // result: (LEAL1 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(is32Bit(c+d) && x.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - d := v_1.AuxInt - x := v_1.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) // result: (LEAL2 [c] {s} x y) @@ -4211,36 +3791,21 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL2) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386LEAL2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) - // result: (LEAL2 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v.reset(Op386LEAL2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) // result: (LEAL4 [c] {s} x y) @@ -4248,36 +3813,21 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386LEAL4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) - // result: (LEAL4 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - v.reset(Op386LEAL4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) // result: (LEAL8 [c] {s} x y) @@ -4285,85 +3835,49 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(Op386LEAL8) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) - // result: (LEAL8 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - v.reset(Op386LEAL8) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) - for { - off1 := v.AuxInt - sym1 := v.Aux - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [off1] {sym1} y (LEAL [off2] {sym2} x)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386LEAL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386LEAL { + continue + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true } - off2 := v_1.AuxInt - sym2 := v_1.Aux - x := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { - break - } - v.reset(Op386LEAL1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -4927,42 +4441,22 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVBloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(Op386MOVBloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBload [off] {sym} (ADDL idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVBloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVBloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBload [off] {sym} (SB) _) // cond: symIsRO(sym) @@ -4988,41 +4482,23 @@ func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + v.reset(Op386MOVBloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) - // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) @@ -5030,41 +4506,23 @@ func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDLconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + v.reset(Op386MOVBloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) - // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -5224,46 +4682,24 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVBstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(Op386MOVBstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} (ADDL idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVBstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) @@ -5323,9 +4759,6 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVBstore_10(v *Value) bool { // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i] {s} p w mem) @@ -5355,6 +4788,9 @@ func rewriteValue386_Op386MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValue386_Op386MOVBstore_10(v *Value) bool { // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i] {s} p w mem) @@ -5653,45 +5089,25 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + v.reset(Op386MOVBstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) - // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) @@ -5699,45 +5115,25 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDLconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + v.reset(Op386MOVBstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) @@ -5746,122 +5142,34 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) @@ -5870,125 +5178,34 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool { - // match: (MOVBstoreidx1 [i] {s} idx p (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -5997,122 +5214,34 @@ func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRLconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRLconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -6121,126 +5250,35 @@ func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } - // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != Op386SHRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVBstoreidx1_20(v *Value) bool { // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) @@ -6248,142 +5286,39 @@ func rewriteValue386_Op386MOVBstoreidx1_20(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -6525,42 +5460,22 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVLloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(Op386MOVLloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLload [off] {sym} (ADDL idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVLloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVLloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLload [off] {sym} (SB) _) // cond: symIsRO(sym) @@ -6586,39 +5501,22 @@ func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { + continue + } + idx := v_1.Args[0] + v.reset(Op386MOVLloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - v.reset(Op386MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (SHLLconst [2] idx) ptr mem) - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(Op386MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) @@ -6626,41 +5524,23 @@ func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + v.reset(Op386MOVLloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) - // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) @@ -6668,41 +5548,23 @@ func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDLconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + v.reset(Op386MOVLloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) - // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -6895,46 +5757,24 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVLstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off] {sym} (ADDL idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) // cond: y.Uses==1 && clobber(y) @@ -7011,9 +5851,6 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstore_10(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) // cond: y.Uses==1 && clobber(y) // result: (XORLmodify [off] {sym} ptr x mem) @@ -7039,38 +5876,12 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValue386_Op386MOVLstore_10(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ADDLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ADDL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ADDLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -7081,22 +5892,28 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) @@ -7130,35 +5947,6 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ANDLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ANDL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ANDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ANDLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -7169,55 +5957,32 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ANDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ORLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ORL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ORLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -7228,55 +5993,32 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (XORLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386XORL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386XORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (XORLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -7287,26 +6029,29 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386XORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } - return false -} -func rewriteValue386_Op386MOVLstore_20(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) @@ -7662,43 +6407,24 @@ func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { + continue + } + idx := v_1.Args[0] + val := v.Args[2] + v.reset(Op386MOVLstoreidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (SHLLconst [2] idx) ptr val mem) - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) @@ -7706,45 +6432,25 @@ func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) - // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) @@ -7752,45 +6458,25 @@ func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDLconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } @@ -7952,37 +6638,6 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] - if y.Op != Op386ADDL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ADDLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux @@ -7994,23 +6649,29 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386ADDLmodifyidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ADDLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(SUBL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) @@ -8046,40 +6707,6 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] - if y.Op != Op386ANDL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ANDLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { - // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux @@ -8091,58 +6718,33 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386ANDLmodifyidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ANDLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] - if y.Op != Op386ORL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ORLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux @@ -8154,58 +6756,36 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386ORLmodifyidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386ORLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] - if y.Op != Op386XORL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386XORLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux @@ -8217,23 +6797,29 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(Op386XORLmodifyidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386XORLmodifyidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) @@ -8490,42 +7076,22 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVSDloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(Op386MOVSDloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDload [off] {sym} (ADDL idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVSDloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -8740,46 +7306,24 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVSDstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(Op386MOVSDstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstore [off] {sym} (ADDL idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVSDstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } @@ -9014,42 +7558,22 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVSSloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(Op386MOVSSloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSload [off] {sym} (ADDL idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVSSloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -9264,46 +7788,24 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVSSstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(Op386MOVSSstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstore [off] {sym} (ADDL idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVSSstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } @@ -9739,42 +8241,22 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVWloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(Op386MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off] {sym} (ADDL idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVWloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWload [off] {sym} (SB) _) // cond: symIsRO(sym) @@ -9800,39 +8282,22 @@ func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { + continue + } + idx := v_1.Args[0] + v.reset(Op386MOVWloadidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - v.reset(Op386MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (SHLLconst [1] idx) ptr mem) - // result: (MOVWloadidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(Op386MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) @@ -9840,41 +8305,23 @@ func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + v.reset(Op386MOVWloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - v.reset(Op386MOVWloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) - // result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - v.reset(Op386MOVWloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) // result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) @@ -9882,41 +8329,23 @@ func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDLconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + v.reset(Op386MOVWloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - v.reset(Op386MOVWloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) - // result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(Op386MOVWloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -10149,46 +8578,24 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { if v_0.Op != Op386ADDL { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} (ADDL idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) @@ -10219,9 +8626,6 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVWstore_10(v *Value) bool { // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w0 mem) @@ -10615,43 +9019,24 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { + continue + } + idx := v_1.Args[0] + val := v.Args[2] + v.reset(Op386MOVWstoreidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVWstoreidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (SHLLconst [1] idx) ptr val mem) - // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst || v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVWstoreidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) @@ -10659,45 +9044,25 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386ADDLconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + v.reset(Op386MOVWstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) - // result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) @@ -10705,45 +9070,25 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386ADDLconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + v.reset(Op386MOVWstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) @@ -10752,126 +9097,35 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstoreidx1_10(v *Value) bool { // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) @@ -10879,142 +9133,39 @@ func rewriteValue386_Op386MOVWstoreidx1_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(Op386MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} idx p w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} idx p w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -11146,132 +9297,77 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { // result: (MULLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386MULLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(Op386MULLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULL (MOVLconst [c]) x) - // result: (MULLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386MULLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (MULL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386MULLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386MULLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (MULLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386MULLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (MULL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLloadidx4 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLloadidx4 { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386MULLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MULL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (MULLloadidx4 x [off] {sym} ptr idx mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLloadidx4 { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386MULLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -11933,49 +10029,28 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVSDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVSDload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + continue + } + v.reset(Op386MULSDload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386MULSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) - // result: (MULSDload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVSDload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386MULSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -12043,49 +10118,28 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVSSload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVSSload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + continue + } + v.reset(Op386MULSSload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386MULSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) - // result: (MULSSload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVSSload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { - break - } - v.reset(Op386MULSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -12176,289 +10230,166 @@ func rewriteValue386_Op386NOTL_0(v *Value) bool { return false } func rewriteValue386_Op386ORL_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ORL x (MOVLconst [c])) // result: (ORLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386ORLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(Op386ORLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (ORL (MOVLconst [c]) x) - // result: (ORLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst [c] x) (SHRLconst [d] x)) + // match: ( ORL (SHLLconst [c] x) (SHRLconst [d] x)) // cond: d == 32-c // result: (ROLLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRLconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(Op386ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (ORL (SHRLconst [d] x) (SHLLconst [c] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRWconst x [d])) + // match: ( ORL (SHLLconst x [c]) (SHRWconst x [d])) // cond: c < 16 && d == 16-c && t.Size() == 2 // result: (ROLWconst x [c]) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { + continue + } + v.reset(Op386ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (ORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRBconst x [d])) + // match: ( ORL (SHLLconst x [c]) (SHRBconst x [d])) // cond: c < 8 && d == 8-c && t.Size() == 1 // result: (ROLBconst x [c]) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRBconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { + continue + } + v.reset(Op386ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ORLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } - // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ORLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386ORL_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLloadidx4 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLloadidx4 { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ORLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ORLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ORLloadidx4 x [off] {sym} ptr idx mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLloadidx4 { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386ORLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (ORL x x) // result: x @@ -12477,2646 +10408,232 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != Op386MOVBload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != Op386MOVBload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + s0 := v.Args[1^_i0] + if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { + continue + } + x1 := s0.Args[0] + if x1.Op != Op386MOVBload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != Op386MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, Op386MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORL o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) for { _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != Op386ORL { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x0 := o0.Args[_i1] + if x0.Op != Op386MOVWload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + s0 := o0.Args[1^_i1] + if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { + continue + } + x1 := s0.Args[0] + if x1.Op != Op386MOVBload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + s1 := v.Args[1^_i0] + if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { + continue + } + x2 := s1.Args[0] + if x2.Op != Op386MOVBload { + continue + } + i3 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := o0.Args[1] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)) o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, Op386MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)) x0:(MOVWload [i0] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, Op386MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != Op386MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + s0 := v.Args[1^_i0] + if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { + continue + } + x1 := s0.Args[0] + if x1.Op != Op386MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } return false } -func rewriteValue386_Op386ORL_20(v *Value) bool { +func rewriteValue386_Op386ORL_10(v *Value) bool { b := v.Block - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386ORL_30(v *Value) bool { - b := v.Block - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386ORL_40(v *Value) bool { - b := v.Block - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[2] - if idx != x2.Args[0] || p != x2.Args[1] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386ORL_50(v *Value) bool { - b := v.Block - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - p := x2.Args[0] - idx := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[2] - idx := x2.Args[0] - p := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != Op386ORL { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x0 := o0.Args[_i1] + if x0.Op != Op386MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + p := x0.Args[_i2] + idx := x0.Args[1^_i2] + s0 := o0.Args[1^_i1] + if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { + continue + } + x1 := s0.Args[0] + if x1.Op != Op386MOVBloadidx1 { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + s1 := v.Args[1^_i0] + if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { + continue + } + x2 := s1.Args[0] + if x2.Op != Op386MOVBloadidx1 { + continue + } + i3 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[2] + for _i4 := 0; _i4 <= 1; _i4++ { + if p != x2.Args[_i4] || idx != x2.Args[1^_i4] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } + } + } + } + break } return false } @@ -17516,78 +13033,46 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // result: (XORLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(Op386XORLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(Op386XORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (MOVLconst [c]) x) - // result: (XORLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386XORLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL (SHLLconst [c] x) (SHRLconst [d] x)) // cond: d == 32-c // result: (ROLLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRLconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(Op386ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (SHRLconst [d] x) (SHLLconst [c] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL (SHLLconst x [c]) (SHRWconst x [d])) // cond: c < 16 && d == 16-c && t.Size() == 2 @@ -17595,49 +13080,27 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { + continue + } + v.reset(Op386ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL (SHLLconst x [c]) (SHRBconst x [d])) // cond: c < 8 && d == 8-c && t.Size() == 1 @@ -17645,154 +13108,85 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != Op386SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != Op386SHRBconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { + continue + } + v.reset(Op386ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386XORLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386XORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } - // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (XORLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386XORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386XORL_10(v *Value) bool { // match: (XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVLloadidx4 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != Op386MOVLloadidx4 { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386XORLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386XORLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (XORLloadidx4 x [off] {sym} ptr idx mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVLloadidx4 { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[2] - ptr := l.Args[0] - idx := l.Args[1] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(Op386XORLloadidx4) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (XORL x x) // result: (MOVLconst [0]) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 98619943d7..11bf714195 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -13,7 +13,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64ADCQconst: return rewriteValueAMD64_OpAMD64ADCQconst_0(v) case OpAMD64ADDL: - return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v) + return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) case OpAMD64ADDLconst: return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v) case OpAMD64ADDLconstmodify: @@ -23,7 +23,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64ADDLmodify: return rewriteValueAMD64_OpAMD64ADDLmodify_0(v) case OpAMD64ADDQ: - return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) + return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) case OpAMD64ADDQcarry: return rewriteValueAMD64_OpAMD64ADDQcarry_0(v) case OpAMD64ADDQconst: @@ -301,13 +301,13 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVQi2f: return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) case OpAMD64MOVQload: - return rewriteValueAMD64_OpAMD64MOVQload_0(v) || rewriteValueAMD64_OpAMD64MOVQload_10(v) + return rewriteValueAMD64_OpAMD64MOVQload_0(v) case OpAMD64MOVQloadidx1: return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) case OpAMD64MOVQloadidx8: return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) case OpAMD64MOVQstore: - return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v) + return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) case OpAMD64MOVQstoreconst: return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) case OpAMD64MOVQstoreconstidx1: @@ -391,7 +391,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64NOTQ: return rewriteValueAMD64_OpAMD64NOTQ_0(v) case OpAMD64ORL: - return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) + return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) case OpAMD64ORLconst: return rewriteValueAMD64_OpAMD64ORLconst_0(v) case OpAMD64ORLconstmodify: @@ -401,7 +401,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64ORLmodify: return rewriteValueAMD64_OpAMD64ORLmodify_0(v) case OpAMD64ORQ: - return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) + return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) case OpAMD64ORQconst: return rewriteValueAMD64_OpAMD64ORQconst_0(v) case OpAMD64ORQconstmodify: @@ -475,7 +475,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64SETBstore: return rewriteValueAMD64_OpAMD64SETBstore_0(v) case OpAMD64SETEQ: - return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v) + return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) case OpAMD64SETEQstore: return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v) case OpAMD64SETG: @@ -495,7 +495,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64SETLstore: return rewriteValueAMD64_OpAMD64SETLstore_0(v) case OpAMD64SETNE: - return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v) + return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) case OpAMD64SETNEstore: return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v) case OpAMD64SHLL: @@ -573,7 +573,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64XCHGQ: return rewriteValueAMD64_OpAMD64XCHGQ_0(v) case OpAMD64XORL: - return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) + return rewriteValueAMD64_OpAMD64XORL_0(v) case OpAMD64XORLconst: return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) case OpAMD64XORLconstmodify: @@ -583,7 +583,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64XORLmodify: return rewriteValueAMD64_OpAMD64XORLmodify_0(v) case OpAMD64XORQ: - return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v) + return rewriteValueAMD64_OpAMD64XORQ_0(v) case OpAMD64XORQconst: return rewriteValueAMD64_OpAMD64XORQconst_0(v) case OpAMD64XORQconstmodify: @@ -1161,40 +1161,23 @@ func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { // result: (ADCQconst x [c] carry) for { carry := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ADCQconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(carry) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ADCQconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(carry) - return true - } - // match: (ADCQ (MOVQconst [c]) x carry) - // cond: is32Bit(c) - // result: (ADCQconst x [c] carry) - for { - carry := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ADCQconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(carry) - return true + break } // match: (ADCQ x y (FlagEQ)) // result: (ADDQcarry x y) @@ -1236,78 +1219,46 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // result: (ADDLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64ADDLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64ADDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (MOVLconst [c]) x) - // result: (ADDLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64ADDLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) // cond: d==32-c // result: (ROLLconst x [c]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRLconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpAMD64ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL (SHLLconst x [c]) (SHRWconst x [d])) // cond: d==16-c && c < 16 && t.Size() == 2 @@ -1315,49 +1266,27 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { + continue + } + v.reset(OpAMD64ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL (SHLLconst x [c]) (SHRBconst x [d])) // cond: d==8-c && c < 8 && t.Size() == 1 @@ -1365,405 +1294,221 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRBconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { + continue + } + v.reset(OpAMD64ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDL x (SHLLconst [3] y)) // result: (LEAL8 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL8) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAL8) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ADDL (SHLLconst [3] y) x) - // result: (LEAL8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAL8) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { // match: (ADDL x (SHLLconst [2] y)) // result: (LEAL4 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL4) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAL4) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (SHLLconst [2] y) x) - // result: (LEAL4 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAL4) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (SHLLconst [1] y)) // result: (LEAL2 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL2) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (SHLLconst [1] y) x) - // result: (LEAL2 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (ADDL y y)) // result: (LEAL2 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDL { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpAMD64LEAL2) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - if y != v_1.Args[0] { - break - } - v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (ADDL y y) x) - // result: (LEAL2 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDL { - break - } - y := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpAMD64LEAL2) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (ADDL x y)) // result: (LEAL2 y x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpAMD64LEAL2) + v.AddArg(y) + v.AddArg(x) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpAMD64LEAL2) - v.AddArg(y) - v.AddArg(x) - return true + break } - // match: (ADDL x (ADDL y x)) - // result: (LEAL2 y x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDL { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpAMD64LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDL (ADDL x y) x) - // result: (LEAL2 y x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDL { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpAMD64LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDL (ADDL y x) x) - // result: (LEAL2 y x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDL { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpAMD64LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { // match: (ADDL (ADDLconst [c] x) y) // result: (LEAL1 [c] x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL y (ADDLconst [c] x)) - // result: (LEAL1 [c] x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpAMD64LEAL1) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - x := v_1.Args[0] - v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { // match: (ADDL x (LEAL [c] {s} y)) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAL1 [c] {s} x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64LEAL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64LEAL { + continue + } + c := v_1.AuxInt + s := v_1.Aux + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAL1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - s := v_1.Aux - y := v_1.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (LEAL [c] {s} y) x) - // cond: x.Op != OpSB && y.Op != OpSB - // result: (LEAL1 [c] {s} x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - c := v_0.AuxInt - s := v_0.Aux - y := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x (NEGL y)) // result: (SUBL x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NEGL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64NEGL { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64SUBL) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64SUBL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDL (NEGL y) x) - // result: (SUBL x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64NEGL { - break - } - y := v_0.Args[0] - v.reset(OpAMD64SUBL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ADDLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -2162,441 +1907,243 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // result: (ADDQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (ADDQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) // cond: d==64-c // result: (ROLQconst x [c]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRQconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpAMD64ROLQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDQ x (SHLQconst [3] y)) // result: (LEAQ8 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDQ (SHLQconst [3] y) x) - // result: (LEAQ8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDQ x (SHLQconst [2] y)) // result: (LEAQ4 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDQ (SHLQconst [2] y) x) - // result: (LEAQ4 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDQ x (SHLQconst [1] y)) // result: (LEAQ2 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ADDQ (SHLQconst [1] y) x) - // result: (LEAQ2 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { // match: (ADDQ x (ADDQ y y)) // result: (LEAQ2 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDQ { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - if y != v_1.Args[0] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDQ (ADDQ y y) x) - // result: (LEAQ2 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - y := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDQ x (ADDQ x y)) // result: (LEAQ2 y x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDQ { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpAMD64LEAQ2) + v.AddArg(y) + v.AddArg(x) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDQ x (ADDQ y x)) - // result: (LEAQ2 y x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQ { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDQ (ADDQ x y) x) - // result: (LEAQ2 y x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDQ (ADDQ y x) x) - // result: (LEAQ2 y x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) - return true + break } // match: (ADDQ (ADDQconst [c] x) y) // result: (LEAQ1 [c] x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDQ y (ADDQconst [c] x)) - // result: (LEAQ1 [c] x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDQconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - x := v_1.Args[0] - v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDQ x (LEAQ [c] {s} y)) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAQ1 [c] {s} x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64LEAQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64LEAQ { + continue + } + c := v_1.AuxInt + s := v_1.Aux + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - s := v_1.Aux - y := v_1.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ADDQ (LEAQ [c] {s} y) x) - // cond: x.Op != OpSB && y.Op != OpSB - // result: (LEAQ1 [c] {s} x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - c := v_0.AuxInt - s := v_0.Aux - y := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { // match: (ADDQ x (NEGQ y)) // result: (SUBQ x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NEGQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64NEGQ { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64SUBQ) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64SUBQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDQ (NEGQ y) x) - // result: (SUBQ x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64NEGQ { - break - } - y := v_0.Args[0] - v.reset(OpAMD64SUBQ) - v.AddArg(x) - v.AddArg(y) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDQload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVQload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVQload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDQload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ADDQload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVQload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -2606,37 +2153,22 @@ func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { // result: (ADDQconstcarry x [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ADDQconstcarry) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDQcarry (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (ADDQconstcarry x [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ADDQconstcarry) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -3037,49 +2569,28 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVSDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVSDload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDSDload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ADDSDload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVSDload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -3169,49 +2680,28 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVSSload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVSSload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDSSload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ADDSSload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVSSload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ADDSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -3298,114 +2788,69 @@ func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) // result: (BTRL x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64NOTL { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - y := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTRL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ANDL x (NOTL (SHLL (MOVLconst [1]) y))) - // result: (BTRL x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NOTL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64NOTL { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SHLL { + continue + } + y := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpAMD64BTRL) + v.AddArg(x) + v.AddArg(y) + return true } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLL { - break - } - y := v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTRL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ANDL (MOVLconst [c]) x) // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 // result: (BTRLconst [log2uint32(^c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { - break - } - v.reset(OpAMD64BTRLconst) - v.AuxInt = log2uint32(^c) - v.AddArg(x) - return true - } - // match: (ANDL x (MOVLconst [c])) - // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 - // result: (BTRLconst [log2uint32(^c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { + continue + } + v.reset(OpAMD64BTRLconst) + v.AuxInt = log2uint32(^c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { - break - } - v.reset(OpAMD64BTRLconst) - v.AuxInt = log2uint32(^c) - v.AddArg(x) - return true + break } // match: (ANDL x (MOVLconst [c])) // result: (ANDLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDL (MOVLconst [c]) x) - // result: (ANDLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ANDL x x) // result: x @@ -3424,49 +2869,28 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ANDLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ANDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ANDLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ANDLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -3765,122 +3189,73 @@ func rewriteValueAMD64_OpAMD64ANDLmodify_0(v *Value) bool { func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) // result: (BTRQ x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64NOTQ { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - y := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTRQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ANDQ x (NOTQ (SHLQ (MOVQconst [1]) y))) - // result: (BTRQ x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NOTQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64NOTQ { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SHLQ { + continue + } + y := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpAMD64BTRQ) + v.AddArg(x) + v.AddArg(y) + return true } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLQ { - break - } - y := v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTRQ) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ANDQ (MOVQconst [c]) x) // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 // result: (BTRQconst [log2(^c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { - break - } - v.reset(OpAMD64BTRQconst) - v.AuxInt = log2(^c) - v.AddArg(x) - return true - } - // match: (ANDQ x (MOVQconst [c])) - // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 - // result: (BTRQconst [log2(^c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { + continue + } + v.reset(OpAMD64BTRQconst) + v.AuxInt = log2(^c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { - break - } - v.reset(OpAMD64BTRQconst) - v.AuxInt = log2(^c) - v.AddArg(x) - return true + break } // match: (ANDQ x (MOVQconst [c])) // cond: is32Bit(c) // result: (ANDQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ANDQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (ANDQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ANDQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ANDQ x x) // result: x @@ -3899,49 +3274,28 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { // result: (ANDQload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVQload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVQload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ANDQload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ANDQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ANDQload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVQload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ANDQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -10208,39 +9562,21 @@ func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { if v_0.Op != OpAMD64ADDL { break } - y := v_0.Args[1] - x := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAL1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL [c] {s} (ADDL y x)) - // cond: x.Op != OpSB && y.Op != OpSB - // result: (LEAL1 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDL { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(OpAMD64LEAL1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -10248,49 +9584,29 @@ func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAL1 [c+d] {s} x y) - for { - c := v.AuxInt - s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(OpAMD64LEAL1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) - // cond: is32Bit(c+d) && x.Op != OpSB - // result: (LEAL1 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDLconst { + continue + } + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(is32Bit(c+d) && x.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAL1) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - d := v_1.AuxInt - x := v_1.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(OpAMD64LEAL1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) // result: (LEAL2 [c] {s} x y) @@ -10298,36 +9614,21 @@ func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL2) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAL2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) - // result: (LEAL2 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAL2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) // result: (LEAL4 [c] {s} x y) @@ -10335,36 +9636,21 @@ func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL4) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAL4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) - // result: (LEAL4 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAL4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) // result: (LEAL8 [c] {s} x y) @@ -10372,36 +9658,21 @@ func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL8) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAL8) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) - // result: (LEAL8 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAL8) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -10644,39 +9915,21 @@ func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - y := v_0.Args[1] - x := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAQ [c] {s} (ADDQ y x)) - // cond: x.Op != OpSB && y.Op != OpSB - // result: (LEAQ1 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - if !(x.Op != OpSB && y.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) @@ -10802,49 +10055,29 @@ func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAQ1 [c+d] {s} x y) - for { - c := v.AuxInt - s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) - // cond: is32Bit(c+d) && x.Op != OpSB - // result: (LEAQ1 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDQconst { + continue + } + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(is32Bit(c+d) && x.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - d := v_1.AuxInt - x := v_1.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) // result: (LEAQ2 [c] {s} x y) @@ -10852,36 +10085,21 @@ func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ2) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAQ2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) - // result: (LEAQ2 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAQ2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) // result: (LEAQ4 [c] {s} x y) @@ -10889,36 +10107,21 @@ func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ4) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAQ4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) - // result: (LEAQ4 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAQ4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) // result: (LEAQ8 [c] {s} x y) @@ -10926,85 +10129,49 @@ func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { c := v.AuxInt s := v.Aux _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ8) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpAMD64LEAQ8) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) - // result: (LEAQ8 [c] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - v.reset(OpAMD64LEAQ8) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) - for { - off1 := v.AuxInt - sym1 := v.Aux - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64LEAQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64LEAQ { + continue + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true } - off2 := v_1.AuxInt - sym2 := v_1.Aux - x := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { - break - } - v.reset(OpAMD64LEAQ1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -11801,42 +10968,22 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBload [off] {sym} (ADDQ idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVBloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) @@ -11910,48 +11057,26 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDQconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: is32Bit(c+d) - // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) // cond: is32Bit(c+d) @@ -11960,48 +11085,26 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDQconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: is32Bit(c+d) - // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBloadidx1 [i] {s} p (MOVQconst [c]) mem) // cond: is32Bit(i+c) @@ -12010,44 +11113,24 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(i + c)) { + continue + } + v.reset(OpAMD64MOVBload) + v.AuxInt = i + c + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true } - c := v_1.AuxInt - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVBload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [i] {s} (MOVQconst [c]) p mem) - // cond: is32Bit(i+c) - // result: (MOVBload [i+c] {s} p mem) - for { - i := v.AuxInt - s := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - p := v.Args[1] - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVBload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true + break } return false } @@ -12472,46 +11555,24 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVBstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVBstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} (ADDQ idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVBstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) // cond: x0.Uses == 1 && clobber(x0) @@ -12545,10 +11606,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { - b := v.Block // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVLstore [i-3] {s} p (BSWAPL w) mem) @@ -12604,6 +11661,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) // result: (MOVQstore [i-7] {s} p (BSWAPQ w) mem) @@ -12949,11 +12011,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) @@ -12999,6 +12056,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -14373,42 +13433,22 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLload [off] {sym} (ADDQ idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVLloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) @@ -14457,11 +13497,6 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { - b := v.Block - config := b.Func.Config // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) // result: (MOVLf2i val) for { @@ -14482,6 +13517,11 @@ func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { v.AddArg(val) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { + b := v.Block + config := b.Func.Config // match: (MOVLload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVQconst [int64(read32(sym, off, config.BigEndian))]) @@ -14506,39 +13546,22 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { + continue + } + idx := v_1.Args[0] + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) // result: (MOVLloadidx8 [c] {sym} ptr idx mem) @@ -14546,39 +13569,22 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { + continue + } + idx := v_1.Args[0] + v.reset(OpAMD64MOVLloadidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - v.reset(OpAMD64MOVLloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) - // result: (MOVLloadidx8 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(OpAMD64MOVLloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) @@ -14587,48 +13593,26 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDQconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: is32Bit(c+d) - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) // cond: is32Bit(c+d) @@ -14637,48 +13621,26 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDQconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: is32Bit(c+d) - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVLloadidx1 [i] {s} p (MOVQconst [c]) mem) // cond: is32Bit(i+c) @@ -14687,44 +13649,24 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(i + c)) { + continue + } + v.reset(OpAMD64MOVLload) + v.AuxInt = i + c + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true } - c := v_1.AuxInt - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVLload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [i] {s} (MOVQconst [c]) p mem) - // cond: is32Bit(i+c) - // result: (MOVLload [i+c] {s} p mem) - for { - i := v.AuxInt - s := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - p := v.Args[1] - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVLload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true + break } return false } @@ -15113,52 +14055,30 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (MOVLstore [off] {sym} (ADDQ idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVQstore [i-4] {s} p w mem) @@ -15418,41 +14338,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ADDLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64ADDL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ADDLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -15463,23 +14351,32 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (SUBLmodify [off] {sym} ptr x mem) @@ -15512,35 +14409,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ANDLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64ANDL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ANDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ANDLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -15551,55 +14419,32 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ANDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ORLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64ORL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ORLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -15610,55 +14455,32 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (XORLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64XORL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64XORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (XORLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -15669,22 +14491,28 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64XORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) @@ -15715,9 +14543,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (BTRLmodify [off] {sym} ptr x mem) @@ -15863,6 +14688,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) @@ -17069,6 +15897,8 @@ func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { + b := v.Block + config := b.Func.Config // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: x @@ -17204,42 +16034,22 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQload [off] {sym} (ADDQ idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVQloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) @@ -17308,11 +16118,6 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { v.AddArg(val) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVQload_10(v *Value) bool { - b := v.Block - config := b.Func.Config // match: (MOVQload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVQconst [int64(read64(sym, off, config.BigEndian))]) @@ -17337,39 +16142,22 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { + continue + } + idx := v_1.Args[0] + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) - // result: (MOVQloadidx8 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) @@ -17378,48 +16166,26 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDQconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: is32Bit(c+d) - // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) // cond: is32Bit(c+d) @@ -17428,48 +16194,26 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDQconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: is32Bit(c+d) - // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVQloadidx1 [i] {s} p (MOVQconst [c]) mem) // cond: is32Bit(i+c) @@ -17478,44 +16222,24 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(i + c)) { + continue + } + v.reset(OpAMD64MOVQload) + v.AuxInt = i + c + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true } - c := v_1.AuxInt - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVQload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [i] {s} (MOVQconst [c]) p mem) - // cond: is32Bit(i+c) - // result: (MOVQload [i+c] {s} p mem) - for { - i := v.AuxInt - s := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - p := v.Args[1] - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVQload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true + break } return false } @@ -17737,46 +16461,24 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off] {sym} (ADDQ idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) @@ -17854,9 +16556,6 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) // cond: y.Uses==1 && clobber(y) // result: (ANDQmodify [off] {sym} ptr x mem) @@ -17882,6 +16581,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) // cond: y.Uses==1 && clobber(y) // result: (ORQmodify [off] {sym} ptr x mem) @@ -17935,35 +16637,6 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ADDQmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64ADDQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ADDQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ADDQmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -17974,22 +16647,28 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ADDQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) @@ -18023,35 +16702,6 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ANDQmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64ANDQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ANDQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ANDQmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -18062,55 +16712,32 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ANDQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ORQmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64ORQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ORQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ORQmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -18121,58 +16748,32 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64ORQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (XORQmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != OpAMD64XORQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break - } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64XORQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (XORQmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux @@ -18183,22 +16784,28 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := y.Args[_i0] + if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + continue + } + _ = l.Args[1] + if ptr != l.Args[0] || mem != l.Args[1] { + continue + } + x := y.Args[1^_i0] + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + continue + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(OpAMD64XORQmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true + break } // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) @@ -18287,6 +16894,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) @@ -18432,9 +17042,6 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool { // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) @@ -19139,42 +17746,22 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVSDloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVSDloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDload [off] {sym} (ADDQ idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSDloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) // result: (MOVQi2f val) @@ -19489,46 +18076,24 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVSDstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVSDstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstore [off] {sym} (ADDQ idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSDstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) // result: (MOVQstore [off] {sym} ptr val mem) @@ -19849,42 +18414,22 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVSSloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVSSloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSload [off] {sym} (ADDQ idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSSloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) // result: (MOVLi2f val) @@ -20199,46 +18744,24 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVSSstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVSSstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstore [off] {sym} (ADDQ idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSSstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) // result: (MOVLstore [off] {sym} ptr val mem) @@ -20941,42 +19464,22 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off] {sym} (ADDQ idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVWloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) @@ -21049,39 +19552,22 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { + continue + } + idx := v_1.Args[0] + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - idx := v_1.Args[0] - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) - // result: (MOVWloadidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) @@ -21090,48 +19576,26 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64ADDQconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: is32Bit(c+d) - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) // cond: is32Bit(c+d) @@ -21140,48 +19604,26 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ADDQconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is32Bit(c + d)) { + continue + } + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: is32Bit(c+d) - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is32Bit(c + d)) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWloadidx1 [i] {s} p (MOVQconst [c]) mem) // cond: is32Bit(i+c) @@ -21190,44 +19632,24 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { i := v.AuxInt s := v.Aux mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(i + c)) { + continue + } + v.reset(OpAMD64MOVWload) + v.AuxInt = i + c + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true } - c := v_1.AuxInt - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVWload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [i] {s} (MOVQconst [c]) p mem) - // cond: is32Bit(i+c) - // result: (MOVWload [i+c] {s} p mem) - for { - i := v.AuxInt - s := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - p := v.Args[1] - if !(is32Bit(i + c)) { - break - } - v.reset(OpAMD64MOVWload) - v.AuxInt = i + c - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true + break } return false } @@ -21512,52 +19934,25 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { if v_0.Op != OpAMD64ADDQ { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } - // match: (MOVWstore [off] {sym} (ADDQ idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w mem) @@ -21587,6 +19982,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w mem) @@ -22626,30 +21026,19 @@ func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { // result: (MULLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64MULLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64MULLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULL (MOVLconst [c]) x) - // result: (MULLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MULLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -23138,37 +21527,22 @@ func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { // result: (MULQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64MULQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64MULQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (MULQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64MULQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -23679,49 +22053,28 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVSDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVSDload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64MULSDload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (MULSDload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVSDload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -23811,49 +22164,28 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVSSload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVSSload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64MULSSload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (MULSSload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVSSload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSSload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -24084,154 +22416,92 @@ func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // match: (ORL (SHLL (MOVLconst [1]) y) x) // result: (BTSL x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTSL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL x (SHLL (MOVLconst [1]) y)) - // result: (BTSL x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpAMD64BTSL) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVLconst || v_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTSL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (MOVLconst [c]) x) // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 // result: (BTSLconst [log2uint32(c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTSLconst) - v.AuxInt = log2uint32(c) - v.AddArg(x) - return true - } - // match: (ORL x (MOVLconst [c])) - // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSLconst [log2uint32(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { + continue + } + v.reset(OpAMD64BTSLconst) + v.AuxInt = log2uint32(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTSLconst) - v.AuxInt = log2uint32(c) - v.AddArg(x) - return true + break } // match: (ORL x (MOVLconst [c])) // result: (ORLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64ORLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (MOVLconst [c]) x) - // result: (ORLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64ORLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) // cond: d==32-c // result: (ROLLconst x [c]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRLconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpAMD64ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORL (SHLLconst x [c]) (SHRWconst x [d])) // cond: d==16-c && c < 16 && t.Size() == 2 @@ -24239,2340 +22509,748 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { + continue + } + v.reset(OpAMD64ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (ORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // match: (ORL (SHLLconst x [c]) (SHRBconst x [d])) // cond: d==8-c && c < 8 && t.Size() == 1 // result: (ROLBconst x [c]) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRBconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { + continue + } + v.reset(OpAMD64ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) // result: (ROLL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGQ { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64ROLL) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) - // result: (ROLL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 32 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGQ { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -32 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRL { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) - // result: (ROLL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGQ { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGQ { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -32 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) - // result: (ROLL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 32 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -32 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 31 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRL { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) // result: (ROLL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGL { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64ROLL) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) - // result: (ROLL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 32 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGL { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -32 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRL { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) - // result: (ROLL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGL { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGL { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -32 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) - // result: (ROLL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 32 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGL { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -32 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 31 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRL { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLL) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) // result: (RORL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGQ { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64RORL) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) - // result: (RORL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRL { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 32 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGQ { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -32 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLL { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) - // result: (RORL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGQ { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGQ { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -32 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) - // result: (RORL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 32 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -32 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 31 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) // result: (RORL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGL { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64RORL) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) - // result: (RORL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRL { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 32 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGL { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -32 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLL { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) - // result: (RORL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGL { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGL { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -32 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) - // result: (RORL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 32 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGL { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -32 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 31 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORL) - v.AddArg(x) - v.AddArg(y) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) // cond: v.Type.Size() == 2 // result: (ROLW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRW { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGQ { + continue + } + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -16 { + continue + } + v_1_0_1_0_0 := v_1_0_1_0.Args[0] + if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 16 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGQ { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -16 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { + continue + } + v.reset(OpAMD64ROLW) + v.AddArg(x) + v.AddArg(y) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRW { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ { - break - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -16 { - break - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 16 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -16 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 16 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGQ { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -16 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 15 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRW { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGQ { - break - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpAMD64ADDQconst || v_1_1_1_0.AuxInt != -16 { - break - } - v_1_1_1_0_0 := v_1_1_1_0.Args[0] - if v_1_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_1_0_0.AuxInt != 15 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { - // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRW { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGQ { - break - } - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpAMD64ADDQconst || v_0_0_1_0.AuxInt != -16 { - break - } - v_0_0_1_0_0 := v_0_0_1_0.Args[0] - if v_0_0_1_0_0.Op != OpAMD64ANDQconst || v_0_0_1_0_0.AuxInt != 15 { - break - } - y := v_0_0_1_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 16 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGQ { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -16 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 15 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 16 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -16 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 15 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRW { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGQ { - break - } - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpAMD64ADDQconst || v_0_1_1_0.AuxInt != -16 { - break - } - v_0_1_1_0_0 := v_0_1_1_0.Args[0] - if v_0_1_1_0_0.Op != OpAMD64ANDQconst || v_0_1_1_0_0.AuxInt != 15 || y != v_0_1_1_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) // cond: v.Type.Size() == 2 // result: (ROLW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRW { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGL { + continue + } + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -16 { + continue + } + v_1_0_1_0_0 := v_1_0_1_0.Args[0] + if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 16 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGL { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -16 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { + continue + } + v.reset(OpAMD64ROLW) + v.AddArg(x) + v.AddArg(y) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRW { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL { - break - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -16 { - break - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 16 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -16 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 16 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGL { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -16 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 15 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRW { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGL { - break - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpAMD64ADDLconst || v_1_1_1_0.AuxInt != -16 { - break - } - v_1_1_1_0_0 := v_1_1_1_0.Args[0] - if v_1_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_1_0_0.AuxInt != 15 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRW { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGL { - break - } - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpAMD64ADDLconst || v_0_0_1_0.AuxInt != -16 { - break - } - v_0_0_1_0_0 := v_0_0_1_0.Args[0] - if v_0_0_1_0_0.Op != OpAMD64ANDLconst || v_0_0_1_0_0.AuxInt != 15 { - break - } - y := v_0_0_1_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 16 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGL { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -16 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 15 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 16 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGL { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -16 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 15 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRW { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGL { - break - } - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpAMD64ADDLconst || v_0_1_1_0.AuxInt != -16 { - break - } - v_0_1_1_0_0 := v_0_1_1_0.Args[0] - if v_0_1_1_0_0.Op != OpAMD64ANDLconst || v_0_1_1_0_0.AuxInt != 15 || y != v_0_1_1_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64ROLW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) // cond: v.Type.Size() == 2 // result: (RORW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLL { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -16 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { + continue + } + v.reset(OpAMD64RORW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGQ { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -16 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) - // cond: v.Type.Size() == 2 - // result: (RORW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64NEGQ { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64ADDQconst || v_0_1_0.AuxInt != -16 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0.AuxInt != 15 { - break - } - y := v_0_1_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) // cond: v.Type.Size() == 2 // result: (RORW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLL { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGL { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -16 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { + continue + } + v.reset(OpAMD64RORW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGL { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -16 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) - // cond: v.Type.Size() == 2 - // result: (RORW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64NEGL { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64ADDLconst || v_0_1_0.AuxInt != -16 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0.AuxInt != 15 { - break - } - y := v_0_1_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { - break - } - v.reset(OpAMD64RORW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) // cond: v.Type.Size() == 1 // result: (ROLB x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRB { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGQ { + continue + } + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -8 { + continue + } + v_1_0_1_0_0 := v_1_0_1_0.Args[0] + if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 8 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGQ { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -8 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { + continue + } + v.reset(OpAMD64ROLB) + v.AddArg(x) + v.AddArg(y) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRB { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ { - break - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -8 { - break - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 8 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -8 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 8 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGQ { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -8 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 7 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRB { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGQ { - break - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpAMD64ADDQconst || v_1_1_1_0.AuxInt != -8 { - break - } - v_1_1_1_0_0 := v_1_1_1_0.Args[0] - if v_1_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_1_0_0.AuxInt != 7 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRB { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGQ { - break - } - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpAMD64ADDQconst || v_0_0_1_0.AuxInt != -8 { - break - } - v_0_0_1_0_0 := v_0_0_1_0.Args[0] - if v_0_0_1_0_0.Op != OpAMD64ANDQconst || v_0_0_1_0_0.AuxInt != 7 { - break - } - y := v_0_0_1_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 8 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGQ { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -8 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 7 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 8 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -8 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 7 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRB { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGQ { - break - } - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpAMD64ADDQconst || v_0_1_1_0.AuxInt != -8 { - break - } - v_0_1_1_0_0 := v_0_1_1_0.Args[0] - if v_0_1_1_0_0.Op != OpAMD64ANDQconst || v_0_1_1_0_0.AuxInt != 7 || y != v_0_1_1_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) // cond: v.Type.Size() == 1 // result: (ROLB x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDL { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRB { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGL { + continue + } + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -8 { + continue + } + v_1_0_1_0_0 := v_1_0_1_0.Args[0] + if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBLcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 8 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGL { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -8 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { + continue + } + v.reset(OpAMD64ROLB) + v.AddArg(x) + v.AddArg(y) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRB { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL { - break - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -8 { - break - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBLcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 8 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -8 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDL { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBLcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 8 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGL { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -8 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 7 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRB { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGL { - break - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpAMD64ADDLconst || v_1_1_1_0.AuxInt != -8 { - break - } - v_1_1_1_0_0 := v_1_1_1_0.Args[0] - if v_1_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_1_0_0.AuxInt != 7 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRB { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGL { - break - } - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpAMD64ADDLconst || v_0_0_1_0.AuxInt != -8 { - break - } - v_0_0_1_0_0 := v_0_0_1_0.Args[0] - if v_0_0_1_0_0.Op != OpAMD64ANDLconst || v_0_0_1_0_0.AuxInt != 7 { - break - } - y := v_0_0_1_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBLcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 8 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGL { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -8 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 7 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDL { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBLcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 8 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGL { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -8 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 7 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRB { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGL { - break - } - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpAMD64ADDLconst || v_0_1_1_0.AuxInt != -8 { - break - } - v_0_1_1_0_0 := v_0_1_1_0.Args[0] - if v_0_1_1_0_0.Op != OpAMD64ANDLconst || v_0_1_1_0_0.AuxInt != 7 || y != v_0_1_1_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64ROLB) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) // cond: v.Type.Size() == 1 // result: (RORB x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRB { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRB { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLL { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -8 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { + continue + } + v.reset(OpAMD64RORB) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGQ { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -8 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) - // cond: v.Type.Size() == 1 - // result: (RORB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64NEGQ { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64ADDQconst || v_0_1_0.AuxInt != -8 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0.AuxInt != 7 { - break - } - y := v_0_1_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRB { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) // cond: v.Type.Size() == 1 // result: (RORB x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRB { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRB { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHLL { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGL { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -8 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { + continue + } + v.reset(OpAMD64RORB) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGL { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -8 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) - // cond: v.Type.Size() == 1 - // result: (RORB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64NEGL { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64ADDLconst || v_0_1_0.AuxInt != -8 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0.AuxInt != 7 { - break - } - y := v_0_1_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRB { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { - break - } - v.reset(OpAMD64RORB) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORL x x) // result: x @@ -26591,4227 +23269,702 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVWload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVWload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpAMD64SHLLconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORL { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := or.Args[_i1] + if s0.Op != OpAMD64SHLLconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVWloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpAMD64SHLLconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORL { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s0 := or.Args[_i2] + if s0.Op != OpAMD64SHLLconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(idx) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = 8 + v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) for { _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r1 := v.Args[_i0] + if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVWload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + continue + } + r0 := sh.Args[0] + if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVWload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpAMD64SHLLconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORL { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := or.Args[_i1] + if s1.Op != OpAMD64SHLLconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) + v2.AuxInt = 8 + v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = 8 + v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) for { _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r1 := v.Args[_i0] + if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVWloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + continue + } + r0 := sh.Args[0] + if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } return false } -func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { +func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpAMD64SHLLconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORL { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s1 := or.Args[_i2] + if s1.Op != OpAMD64SHLLconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) + v2.AuxInt = 8 + v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(idx) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ORLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ORLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -31005,7 +24158,7 @@ func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool { v.AddArg(mem) return true } - // match: (ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // result: ( ORL x (MOVLf2i y)) for { off := v.AuxInt @@ -31086,1012 +24239,328 @@ func rewriteValueAMD64_OpAMD64ORLmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ORQ (SHLQ (MOVQconst [1]) y) x) // result: (BTSQ x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQ { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTSQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ x (SHLQ (MOVQconst [1]) y)) - // result: (BTSQ x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQ { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpAMD64BTSQ) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVQconst || v_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTSQ) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORQ (MOVQconst [c]) x) // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 // result: (BTSQconst [log2(c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTSQconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (ORQ x (MOVQconst [c])) - // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSQconst [log2(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { + continue + } + v.reset(OpAMD64BTSQconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTSQconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break } // match: (ORQ x (MOVQconst [c])) // cond: is32Bit(c) // result: (ORQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ORQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (ORQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ORQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) // cond: d==64-c // result: (ROLQconst x [c]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRQconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpAMD64ROLQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) // result: (ROLQ x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQ { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDQ { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRQ { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBQcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGQ { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64ROLQ) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRQ { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBQcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) - // result: (ROLQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQ { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBQcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 64 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGQ { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -64 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRQ { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { - // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) - // result: (ROLQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRQ { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGQ { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBQcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGQ { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -64 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) - // result: (ROLQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBQcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 64 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -64 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 63 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRQ { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) // result: (ROLQ x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQ { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDQ { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHRQ { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBQcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGL { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64ROLQ) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHRQ { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBQcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) - // result: (ROLQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQ { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBQcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 64 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGL { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -64 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHRQ { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) - // result: (ROLQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHRQ { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGL { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBQcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGL { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -64 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) - // result: (ROLQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBQcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 64 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGL { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -64 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 63 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHRQ { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64ROLQ) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) // result: (RORQ x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRQ { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDQ { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHLQ { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBQcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGQ { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64RORQ) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLQ { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBQcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) - // result: (RORQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQ { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBQcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 64 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGQ { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -64 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLQ { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) - // result: (RORQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGQ { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBQcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGQ { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -64 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) - // result: (RORQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBQcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 64 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -64 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 63 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) // result: (RORQ x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHRQ { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64ANDQ { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpAMD64SHLQ { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpAMD64SBBQcarrymask { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAMD64NEGL { + continue + } + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { + continue + } + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { + continue + } + v.reset(OpAMD64RORQ) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLQ { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SBBQcarrymask { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - break - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { - break - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) - // result: (RORQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQ { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQ { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SBBQcarrymask { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 64 { - break - } - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAMD64NEGL { - break - } - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -64 { - break - } - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLQ { - break - } - _ = v_1_1.Args[1] - if x != v_1_1.Args[0] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) - // result: (RORQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64NEGL { - break - } - y := v_0_0_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SBBQcarrymask { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAMD64NEGL { - break - } - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -64 { - break - } - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) - // result: (RORQ x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDQ { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SBBQcarrymask { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 64 { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64NEGL { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -64 { - break - } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 63 { - break - } - y := v_0_0_0_0_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break - } - _ = v_0_1.Args[1] - x := v_0_1.Args[0] - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQ { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] || y != v_1.Args[1] { - break - } - v.reset(OpAMD64RORQ) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ORQ x x) // result: x @@ -32110,7693 +24579,1194 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVWload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVWload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVLload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVLload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := or.Args[_i1] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVWload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := or.Args[_i1] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVWload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVWloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpAMD64MOVLloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVLloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s0 := or.Args[_i2] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(idx) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVWloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s0 := or.Args[_i2] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(idx) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = 8 + v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) for { _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r1 := v.Args[_i0] + if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVWload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + continue + } + r0 := sh.Args[0] + if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVWload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) for { _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r1 := v.Args[_i0] + if r1.Op != OpAMD64BSWAPL { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVLload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + continue + } + r0 := sh.Args[0] + if r0.Op != OpAMD64BSWAPL { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVLload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := or.Args[_i1] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) + v2.AuxInt = 8 + v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + r0 := s0.Args[0] + if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVWload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := or.Args[_i1] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + r1 := s1.Args[0] + if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVWload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) + v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = 8 + v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) for { _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r1 := v.Args[_i0] + if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVWloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + continue + } + r0 := sh.Args[0] + if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) for { _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r1 := v.Args[_i0] + if r1.Op != OpAMD64BSWAPL { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVLloadidx1 { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + continue + } + r0 := sh.Args[0] + if r0.Op != OpAMD64BSWAPL { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVLloadidx1 { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s1 := or.Args[_i2] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) + v2.AuxInt = 8 + v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(idx) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpAMD64SHLQconst { + continue + } + j0 := s0.AuxInt + r0 := s0.Args[0] + if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + continue + } + x0 := r0.Args[0] + if x0.Op != OpAMD64MOVWloadidx1 { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpAMD64ORQ { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s1 := or.Args[_i2] + if s1.Op != OpAMD64SHLQconst { + continue + } + j1 := s1.AuxInt + r1 := s1.Args[0] + if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + continue + } + x1 := r1.Args[0] + if x1.Op != OpAMD64MOVWloadidx1 { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(idx) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORQload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVQload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVQload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ORQload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ORQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (ORQload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVQload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ORQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -39986,7 +25956,7 @@ func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool { v.AddArg(mem) return true } - // match: (ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // result: ( ORQ x (MOVQf2i y)) for { off := v.AuxInt @@ -42492,101 +28462,59 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { b := v.Block // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) // result: (SETAE (BTL x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) - // result: (SETAE (BTL x y)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) // result: (SETAE (BTQ x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) - // result: (SETAE (BTQ x y)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SETEQ (TESTLconst [c] x)) // cond: isUint32PowerOfTwo(c) @@ -42631,51 +28559,30 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // match: (SETEQ (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) // result: (SETAE (BTQconst [log2(c)] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst { - break - } - c := v_0_0.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTQ x (MOVQconst [c]))) - // cond: isUint64PowerOfTwo(c) - // result: (SETAE (BTQconst [log2(c)] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_0_1.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) // result: (SETNE (CMPLconst [0] s)) @@ -42713,339 +28620,193 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { - b := v.Block // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) // cond: z1==z2 // result: (SETAE (BTQconst [63] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) - // cond: z1==z2 - // result: (SETAE (BTQconst [63] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) // cond: z1==z2 // result: (SETAE (BTQconst [31] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) - // cond: z1==z2 - // result: (SETAE (BTQconst [31] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) // cond: z1==z2 // result: (SETAE (BTQconst [0] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) - // cond: z1==z2 - // result: (SETAE (BTQconst [0] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { + b := v.Block // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) // cond: z1==z2 // result: (SETAE (BTLconst [0] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) - // cond: z1==z2 - // result: (SETAE (BTLconst [0] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) // cond: z1==z2 // result: (SETAE (BTQconst [63] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] x))) - // cond: z1==z2 - // result: (SETAE (BTQconst [63] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true + break } - return false -} -func rewriteValueAMD64_OpAMD64SETEQ_20(v *Value) bool { - b := v.Block // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) // cond: z1==z2 // result: (SETAE (BTLconst [31] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] x))) - // cond: z1==z2 - // result: (SETAE (BTLconst [31] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETEQ (InvertFlags x)) // result: (SETEQ x) @@ -43120,38 +28881,6 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { b := v.Block // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLL { - break - } - x := v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) - // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) for { off := v.AuxInt sym := v.Aux @@ -43162,61 +28891,32 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpAMD64SHLL { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { + continue + } + y := v_1.Args[1^_i0] + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64MOVLconst || v_1_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLQ { - break - } - x := v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) - // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) for { off := v.AuxInt sym := v.Aux @@ -43227,26 +28927,29 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpAMD64SHLQ { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { + continue + } + y := v_1.Args[1^_i0] + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64MOVQconst || v_1_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) // cond: isUint32PowerOfTwo(c) @@ -43307,38 +29010,6 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) // cond: isUint64PowerOfTwo(c) // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVQconst { - break - } - c := v_1_0.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) - // cond: isUint64PowerOfTwo(c) - // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43349,25 +29020,28 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { break } _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpAMD64MOVQconst { + continue + } + c := v_1_0.AuxInt + x := v_1.Args[1^_i0] + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - c := v_1_1.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) @@ -43421,49 +29095,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { - b := v.Block // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) - // cond: z1==z2 - // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43474,69 +29108,36 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) - // cond: z1==z2 - // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43547,69 +29148,36 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) - // cond: z1==z2 - // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43620,69 +29188,41 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) - // cond: z1==z2 - // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43693,65 +29233,36 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) - // cond: z1==z2 - // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43762,66 +29273,32 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + x := z1.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } - return false -} -func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) // cond: z1==z2 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) - // cond: z1==z2 - // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) for { off := v.AuxInt sym := v.Aux @@ -43832,25 +29309,28 @@ func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + x := z1.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETAEstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETEQstore [off] {sym} ptr x mem) @@ -44007,6 +29487,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { @@ -45038,101 +30523,59 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { b := v.Block // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) // result: (SETB (BTL x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) - // result: (SETB (BTL x y)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) // result: (SETB (BTQ x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) - // result: (SETB (BTQ x y)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SETNE (TESTLconst [c] x)) // cond: isUint32PowerOfTwo(c) @@ -45177,51 +30620,30 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // match: (SETNE (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) // result: (SETB (BTQconst [log2(c)] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst { - break - } - c := v_0_0.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTQ x (MOVQconst [c]))) - // cond: isUint64PowerOfTwo(c) - // result: (SETB (BTQconst [log2(c)] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_0_1.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) // result: (SETEQ (CMPLconst [0] s)) @@ -45259,339 +30681,193 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { - b := v.Block // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) // cond: z1==z2 // result: (SETB (BTQconst [63] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) - // cond: z1==z2 - // result: (SETB (BTQconst [63] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) // cond: z1==z2 // result: (SETB (BTQconst [31] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) - // cond: z1==z2 - // result: (SETB (BTQconst [31] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) // cond: z1==z2 // result: (SETB (BTQconst [0] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) - // cond: z1==z2 - // result: (SETB (BTQconst [0] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { + b := v.Block // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) // cond: z1==z2 // result: (SETB (BTLconst [0] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) - // cond: z1==z2 - // result: (SETB (BTLconst [0] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) // cond: z1==z2 // result: (SETB (BTQconst [63] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] x))) - // cond: z1==z2 - // result: (SETB (BTQconst [63] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - return true + break } - return false -} -func rewriteValueAMD64_OpAMD64SETNE_20(v *Value) bool { - b := v.Block // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) // cond: z1==z2 // result: (SETB (BTLconst [31] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (SETNE (TESTL z2 z1:(SHRLconst [31] x))) - // cond: z1==z2 - // result: (SETB (BTLconst [31] x)) for { v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (SETNE (InvertFlags x)) // result: (SETNE x) @@ -45666,38 +30942,6 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { b := v.Block // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) // result: (SETBstore [off] {sym} ptr (BTL x y) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLL { - break - } - x := v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) - // result: (SETBstore [off] {sym} ptr (BTL x y) mem) for { off := v.AuxInt sym := v.Aux @@ -45708,61 +30952,32 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpAMD64SHLL { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { + continue + } + y := v_1.Args[1^_i0] + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64MOVLconst || v_1_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64SHLQ { - break - } - x := v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) - // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) for { off := v.AuxInt sym := v.Aux @@ -45773,26 +30988,29 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpAMD64SHLQ { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { + continue + } + y := v_1.Args[1^_i0] + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64MOVQconst || v_1_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) // cond: isUint32PowerOfTwo(c) @@ -45853,38 +31071,6 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) // cond: isUint64PowerOfTwo(c) // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVQconst { - break - } - c := v_1_0.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) - // cond: isUint64PowerOfTwo(c) - // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) for { off := v.AuxInt sym := v.Aux @@ -45895,25 +31081,28 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { break } _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpAMD64MOVQconst { + continue + } + c := v_1_0.AuxInt + x := v_1.Args[1^_i0] + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - c := v_1_1.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) @@ -45967,49 +31156,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { - b := v.Block // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) - // cond: z1==z2 - // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) for { off := v.AuxInt sym := v.Aux @@ -46020,69 +31169,36 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) - // cond: z1==z2 - // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) for { off := v.AuxInt sym := v.Aux @@ -46093,69 +31209,36 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) - // cond: z1==z2 - // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) for { off := v.AuxInt sym := v.Aux @@ -46166,69 +31249,41 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) - // cond: z1==z2 - // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) for { off := v.AuxInt sym := v.Aux @@ -46239,65 +31294,36 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTQ { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) - // cond: z1==z2 - // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) for { off := v.AuxInt sym := v.Aux @@ -46308,66 +31334,32 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + x := z1.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } - return false -} -func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) // cond: z1==z2 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64TESTL { - break - } - z2 := v_1.Args[1] - z1 := v_1.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) - // cond: z1==z2 - // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) for { off := v.AuxInt sym := v.Aux @@ -46378,25 +31370,28 @@ func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { break } _ = v_1.Args[1] - z2 := v_1.Args[0] - z1 := v_1.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_1.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + x := z1.Args[0] + z2 := v_1.Args[1^_i0] + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - v.reset(OpAMD64SETBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) // result: (SETNEstore [off] {sym} ptr x mem) @@ -46553,6 +31548,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { @@ -48515,85 +33515,51 @@ func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { b := v.Block // match: (TESTB (MOVLconst [c]) x) // result: (TESTBconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64TESTBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TESTB x (MOVLconst [c])) - // result: (TESTBconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + v.reset(OpAMD64TESTBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64TESTBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) - for { - l2 := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVBload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem)) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) for { _ = v.Args[1] - l2 := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVBload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := v.Args[_i0] + if l.Op != OpAMD64MOVBload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + l2 := v.Args[1^_i0] + if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = makeValAndOff(0, off) + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true + break } return false } @@ -48620,85 +33586,51 @@ func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { b := v.Block // match: (TESTL (MOVLconst [c]) x) // result: (TESTLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64TESTLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TESTL x (MOVLconst [c])) - // result: (TESTLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + v.reset(OpAMD64TESTLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64TESTLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) - for { - l2 := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem)) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) for { _ = v.Args[1] - l2 := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := v.Args[_i0] + if l.Op != OpAMD64MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + l2 := v.Args[1^_i0] + if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = makeValAndOff(0, off) + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true + break } return false } @@ -48726,92 +33658,54 @@ func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { // match: (TESTQ (MOVQconst [c]) x) // cond: is32Bit(c) // result: (TESTQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64TESTQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TESTQ x (MOVQconst [c])) - // cond: is32Bit(c) - // result: (TESTQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64TESTQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64TESTQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) - for { - l2 := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVQload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem)) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) for { _ = v.Args[1] - l2 := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVQload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := v.Args[_i0] + if l.Op != OpAMD64MOVQload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + l2 := v.Args[1^_i0] + if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = makeValAndOff(0, off) + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true + break } return false } @@ -48838,85 +33732,51 @@ func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { b := v.Block // match: (TESTW (MOVLconst [c]) x) // result: (TESTWconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64TESTWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TESTW x (MOVLconst [c])) - // result: (TESTWconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + v.reset(OpAMD64TESTWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64TESTWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) - for { - l2 := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVWload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem)) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) for { _ = v.Args[1] - l2 := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + l := v.Args[_i0] + if l.Op != OpAMD64MOVWload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + l2 := v.Args[1^_i0] + if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = makeValAndOff(0, off) + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { - break - } - b = l.Block - v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true + break } return false } @@ -49106,154 +33966,92 @@ func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // match: (XORL (SHLL (MOVLconst [1]) y) x) // result: (BTCL x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLL { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTCL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XORL x (SHLL (MOVLconst [1]) y)) - // result: (BTCL x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpAMD64BTCL) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVLconst || v_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTCL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XORL (MOVLconst [c]) x) // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 // result: (BTCLconst [log2uint32(c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTCLconst) - v.AuxInt = log2uint32(c) - v.AddArg(x) - return true - } - // match: (XORL x (MOVLconst [c])) - // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTCLconst [log2uint32(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { + continue + } + v.reset(OpAMD64BTCLconst) + v.AuxInt = log2uint32(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTCLconst) - v.AuxInt = log2uint32(c) - v.AddArg(x) - return true + break } // match: (XORL x (MOVLconst [c])) // result: (XORLconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := v_1.AuxInt + v.reset(OpAMD64XORLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpAMD64XORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (MOVLconst [c]) x) - // result: (XORLconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64XORLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) // cond: d==32-c // result: (ROLLconst x [c]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRLconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpAMD64ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL (SHLLconst x [c]) (SHRWconst x [d])) // cond: d==16-c && c < 16 && t.Size() == 2 @@ -49261,102 +34059,55 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { + continue + } + v.reset(OpAMD64ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (XORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { // match: (XORL (SHLLconst x [c]) (SHRBconst x [d])) // cond: d==8-c && c < 8 && t.Size() == 1 // result: (ROLBconst x [c]) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLLconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRBconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { + continue + } + v.reset(OpAMD64ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORL x x) // result: (MOVLconst [0]) @@ -49374,49 +34125,28 @@ func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVLload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVLload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64XORLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64XORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (XORLload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64XORLload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -49834,162 +34564,96 @@ func rewriteValueAMD64_OpAMD64XORLmodify_0(v *Value) bool { func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // match: (XORQ (SHLQ (MOVQconst [1]) y) x) // result: (BTCQ x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQ { - break - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTCQ) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XORQ x (SHLQ (MOVQconst [1]) y)) - // result: (BTCQ x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQ { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpAMD64BTCQ) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVQconst || v_1_0.AuxInt != 1 { - break - } - v.reset(OpAMD64BTCQ) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XORQ (MOVQconst [c]) x) // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 // result: (BTCQconst [log2(c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTCQconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (XORQ x (MOVQconst [c])) - // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTCQconst [log2(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { + continue + } + v.reset(OpAMD64BTCQconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { - break - } - v.reset(OpAMD64BTCQconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break } // match: (XORQ x (MOVQconst [c])) // cond: is32Bit(c) // result: (XORQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64XORQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64XORQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (XORQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64XORQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) // cond: d==64-c // result: (ROLQconst x [c]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAMD64SHLQconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAMD64SHRQconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpAMD64ROLQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORQ x x) // result: (MOVQconst [0]) @@ -50007,52 +34671,28 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // result: (XORQload x [off] {sym} ptr mem) for { _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVQload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpAMD64MOVQload { + continue + } + off := l.AuxInt + sym := l.Aux + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64XORQload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64XORQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool { - // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (XORQload x [off] {sym} ptr mem) - for { - x := v.Args[1] - l := v.Args[0] - if l.Op != OpAMD64MOVQload { - break - } - off := l.AuxInt - sym := l.Aux - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64XORQload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -58029,89 +42669,53 @@ func rewriteBlockAMD64(b *Block) bool { case BlockAMD64EQ: // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) // result: (UGE (BTL x y)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true - } - // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) - // result: (UGE (BTL x y)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) // result: (UGE (BTQ x y)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true - } - // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) - // result: (UGE (BTQ x y)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (TESTLconst [c] x)) // cond: isUint32PowerOfTwo(c) @@ -58150,335 +42754,193 @@ func rewriteBlockAMD64(b *Block) bool { // match: (EQ (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) // result: (UGE (BTQconst [log2(c)] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst { - break - } - c := v_0_0.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTQ x (MOVQconst [c]))) - // cond: isUint64PowerOfTwo(c) - // result: (UGE (BTQconst [log2(c)] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(isUint64PowerOfTwo(c)) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + b.AddControl(v0) + return true } - c := v_0_1.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) // cond: z1==z2 // result: (UGE (BTQconst [63] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) - // cond: z1==z2 - // result: (UGE (BTQconst [63] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) // cond: z1==z2 // result: (UGE (BTQconst [31] x)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) - // cond: z1==z2 - // result: (UGE (BTQconst [31] x)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) // cond: z1==z2 // result: (UGE (BTQconst [0] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) - // cond: z1==z2 - // result: (UGE (BTQconst [0] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) // cond: z1==z2 // result: (UGE (BTLconst [0] x)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) - // cond: z1==z2 - // result: (UGE (BTLconst [0] x)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) // cond: z1==z2 // result: (UGE (BTQconst [63] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTQ z2 z1:(SHRQconst [63] x))) - // cond: z1==z2 - // result: (UGE (BTQconst [63] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + b.AddControl(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) // cond: z1==z2 // result: (UGE (BTLconst [31] x)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (EQ (TESTL z2 z1:(SHRLconst [31] x))) - // cond: z1==z2 - // result: (UGE (BTLconst [31] x)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64UGE) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + b.AddControl(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64UGE) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (EQ (InvertFlags cmp) yes no) // result: (EQ cmp yes no) @@ -59041,89 +43503,53 @@ func rewriteBlockAMD64(b *Block) bool { } // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) // result: (ULT (BTL x y)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLL { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true - } - // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) - // result: (ULT (BTL x y)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) // result: (ULT (BTQ x y)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64SHLQ { - break - } - x := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true - } - // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) - // result: (ULT (BTQ x y)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { + continue + } + y := v_0.Args[1^_i0] + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - x := v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (TESTLconst [c] x)) // cond: isUint32PowerOfTwo(c) @@ -59162,335 +43588,193 @@ func rewriteBlockAMD64(b *Block) bool { // match: (NE (TESTQ (MOVQconst [c]) x)) // cond: isUint64PowerOfTwo(c) // result: (ULT (BTQconst [log2(c)] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst { - break - } - c := v_0_0.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTQ x (MOVQconst [c]))) - // cond: isUint64PowerOfTwo(c) - // result: (ULT (BTQconst [log2(c)] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64MOVQconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(isUint64PowerOfTwo(c)) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + b.AddControl(v0) + return true } - c := v_0_1.AuxInt - if !(isUint64PowerOfTwo(c)) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) // cond: z1==z2 // result: (ULT (BTQconst [63] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) - // cond: z1==z2 - // result: (ULT (BTQconst [63] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) // cond: z1==z2 // result: (ULT (BTQconst [31] x)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) - // cond: z1==z2 - // result: (ULT (BTQconst [31] x)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) // cond: z1==z2 // result: (ULT (BTQconst [0] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) - // cond: z1==z2 - // result: (ULT (BTQconst [0] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) // cond: z1==z2 // result: (ULT (BTLconst [0] x)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) - // cond: z1==z2 - // result: (ULT (BTLconst [0] x)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(x) + b.AddControl(v0) + return true } - z1_0 := z1.Args[0] - if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { - break - } - x := z1_0.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) // cond: z1==z2 // result: (ULT (BTQconst [63] x)) - for b.Controls[0].Op == OpAMD64TESTQ { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTQ z2 z1:(SHRQconst [63] x))) - // cond: z1==z2 - // result: (ULT (BTQconst [63] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = 63 + v0.AddArg(x) + b.AddControl(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) - v0.AuxInt = 63 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) // cond: z1==z2 // result: (ULT (BTLconst [31] x)) - for b.Controls[0].Op == OpAMD64TESTL { - v_0 := b.Controls[0] - z2 := v_0.Args[1] - z1 := v_0.Args[0] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break - } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true - } - // match: (NE (TESTL z2 z1:(SHRLconst [31] x))) - // cond: z1==z2 - // result: (ULT (BTLconst [31] x)) for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - z2 := v_0.Args[0] - z1 := v_0.Args[1] - if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + z1 := v_0.Args[_i0] + if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { + continue + } + x := z1.Args[0] + z2 := v_0.Args[1^_i0] + if !(z1 == z2) { + continue + } + b.Reset(BlockAMD64ULT) + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = 31 + v0.AddArg(x) + b.AddControl(v0) + return true } - x := z1.Args[0] - if !(z1 == z2) { - break - } - b.Reset(BlockAMD64ULT) - v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) - v0.AuxInt = 31 - v0.AddArg(x) - b.AddControl(v0) - return true + break } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) // result: (UGT cmp yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 7bd34106fc..b392bad1b1 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -9,7 +9,7 @@ import "cmd/compile/internal/types" func rewriteValueARM(v *Value) bool { switch v.Op { case OpARMADC: - return rewriteValueARM_OpARMADC_0(v) || rewriteValueARM_OpARMADC_10(v) + return rewriteValueARM_OpARMADC_0(v) case OpARMADCconst: return rewriteValueARM_OpARMADCconst_0(v) case OpARMADCshiftLL: @@ -25,13 +25,13 @@ func rewriteValueARM(v *Value) bool { case OpARMADCshiftRLreg: return rewriteValueARM_OpARMADCshiftRLreg_0(v) case OpARMADD: - return rewriteValueARM_OpARMADD_0(v) || rewriteValueARM_OpARMADD_10(v) + return rewriteValueARM_OpARMADD_0(v) case OpARMADDD: return rewriteValueARM_OpARMADDD_0(v) case OpARMADDF: return rewriteValueARM_OpARMADDF_0(v) case OpARMADDS: - return rewriteValueARM_OpARMADDS_0(v) || rewriteValueARM_OpARMADDS_10(v) + return rewriteValueARM_OpARMADDS_0(v) case OpARMADDSshiftLL: return rewriteValueARM_OpARMADDSshiftLL_0(v) case OpARMADDSshiftLLreg: @@ -59,7 +59,7 @@ func rewriteValueARM(v *Value) bool { case OpARMADDshiftRLreg: return rewriteValueARM_OpARMADDshiftRLreg_0(v) case OpARMAND: - return rewriteValueARM_OpARMAND_0(v) || rewriteValueARM_OpARMAND_10(v) || rewriteValueARM_OpARMAND_20(v) + return rewriteValueARM_OpARMAND_0(v) || rewriteValueARM_OpARMAND_10(v) case OpARMANDconst: return rewriteValueARM_OpARMANDconst_0(v) case OpARMANDshiftLL: @@ -95,7 +95,7 @@ func rewriteValueARM(v *Value) bool { case OpARMBICshiftRLreg: return rewriteValueARM_OpARMBICshiftRLreg_0(v) case OpARMCMN: - return rewriteValueARM_OpARMCMN_0(v) || rewriteValueARM_OpARMCMN_10(v) + return rewriteValueARM_OpARMCMN_0(v) case OpARMCMNconst: return rewriteValueARM_OpARMCMNconst_0(v) case OpARMCMNshiftLL: @@ -215,7 +215,7 @@ func rewriteValueARM(v *Value) bool { case OpARMMOVWstoreshiftRL: return rewriteValueARM_OpARMMOVWstoreshiftRL_0(v) case OpARMMUL: - return rewriteValueARM_OpARMMUL_0(v) || rewriteValueARM_OpARMMUL_10(v) || rewriteValueARM_OpARMMUL_20(v) + return rewriteValueARM_OpARMMUL_0(v) || rewriteValueARM_OpARMMUL_10(v) case OpARMMULA: return rewriteValueARM_OpARMMULA_0(v) || rewriteValueARM_OpARMMULA_10(v) || rewriteValueARM_OpARMMULA_20(v) case OpARMMULD: @@ -249,7 +249,7 @@ func rewriteValueARM(v *Value) bool { case OpARMNotEqual: return rewriteValueARM_OpARMNotEqual_0(v) case OpARMOR: - return rewriteValueARM_OpARMOR_0(v) || rewriteValueARM_OpARMOR_10(v) + return rewriteValueARM_OpARMOR_0(v) case OpARMORconst: return rewriteValueARM_OpARMORconst_0(v) case OpARMORshiftLL: @@ -371,7 +371,7 @@ func rewriteValueARM(v *Value) bool { case OpARMSUBshiftRLreg: return rewriteValueARM_OpARMSUBshiftRLreg_0(v) case OpARMTEQ: - return rewriteValueARM_OpARMTEQ_0(v) || rewriteValueARM_OpARMTEQ_10(v) + return rewriteValueARM_OpARMTEQ_0(v) case OpARMTEQconst: return rewriteValueARM_OpARMTEQconst_0(v) case OpARMTEQshiftLL: @@ -387,7 +387,7 @@ func rewriteValueARM(v *Value) bool { case OpARMTEQshiftRLreg: return rewriteValueARM_OpARMTEQshiftRLreg_0(v) case OpARMTST: - return rewriteValueARM_OpARMTST_0(v) || rewriteValueARM_OpARMTST_10(v) + return rewriteValueARM_OpARMTST_0(v) case OpARMTSTconst: return rewriteValueARM_OpARMTSTconst_0(v) case OpARMTSTshiftLL: @@ -403,7 +403,7 @@ func rewriteValueARM(v *Value) bool { case OpARMTSTshiftRLreg: return rewriteValueARM_OpARMTSTshiftRLreg_0(v) case OpARMXOR: - return rewriteValueARM_OpARMXOR_0(v) || rewriteValueARM_OpARMXOR_10(v) + return rewriteValueARM_OpARMXOR_0(v) case OpARMXORconst: return rewriteValueARM_OpARMXORconst_0(v) case OpARMXORshiftLL: @@ -844,252 +844,146 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // result: (ADCconst [c] x flags) for { flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMMOVWconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + v.reset(OpARMADCconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) + return true } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARMADCconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) - return true - } - // match: (ADC x (MOVWconst [c]) flags) - // result: (ADCconst [c] x flags) - for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpARMADCconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) - return true + break } // match: (ADC x (SLLconst [c] y) flags) // result: (ADCshiftLL x y [c] flags) for { flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADCshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + v.AddArg(flags) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADCshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } - // match: (ADC (SLLconst [c] y) x flags) - // result: (ADCshiftLL x y [c] flags) - for { - flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMADCshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true + break } // match: (ADC x (SRLconst [c] y) flags) // result: (ADCshiftRL x y [c] flags) for { flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADCshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + v.AddArg(flags) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADCshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } - // match: (ADC (SRLconst [c] y) x flags) - // result: (ADCshiftRL x y [c] flags) - for { - flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMADCshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true + break } // match: (ADC x (SRAconst [c] y) flags) // result: (ADCshiftRA x y [c] flags) for { flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADCshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + v.AddArg(flags) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADCshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } - // match: (ADC (SRAconst [c] y) x flags) - // result: (ADCshiftRA x y [c] flags) - for { - flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMADCshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true + break } // match: (ADC x (SLL y z) flags) // result: (ADCshiftLLreg x y z flags) for { flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADCshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + v.AddArg(flags) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true + break } - // match: (ADC (SLL y z) x flags) - // result: (ADCshiftLLreg x y z flags) - for { - flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMADCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } - return false -} -func rewriteValueARM_OpARMADC_10(v *Value) bool { // match: (ADC x (SRL y z) flags) // result: (ADCshiftRLreg x y z flags) for { flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADCshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + v.AddArg(flags) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } - // match: (ADC (SRL y z) x flags) - // result: (ADCshiftRLreg x y z flags) - for { - flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMADCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true + break } // match: (ADC x (SRA y z) flags) // result: (ADCshiftRAreg x y z flags) for { flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADCshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + v.AddArg(flags) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } - // match: (ADC (SRA y z) x flags) - // result: (ADCshiftRAreg x y z flags) - for { - flags := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMADCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true + break } return false } @@ -1380,348 +1274,210 @@ func rewriteValueARM_OpARMADCshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADD_0(v *Value) bool { + b := v.Block // match: (ADD x (MOVWconst [c])) // result: (ADDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMADDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMADDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (MOVWconst [c]) x) - // result: (ADDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMADDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD x (SLLconst [c] y)) // result: (ADDshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADDshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADDshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (SLLconst [c] y) x) - // result: (ADDshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMADDshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD x (SRLconst [c] y)) // result: (ADDshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADDshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADDshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (SRLconst [c] y) x) - // result: (ADDshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMADDshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD x (SRAconst [c] y)) // result: (ADDshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADDshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADDshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (SRAconst [c] y) x) - // result: (ADDshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMADDshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD x (SLL y z)) // result: (ADDshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (ADD (SLL y z) x) - // result: (ADDshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMADDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMADD_10(v *Value) bool { - b := v.Block // match: (ADD x (SRL y z)) // result: (ADDshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (ADD (SRL y z) x) - // result: (ADDshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMADDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (ADD x (SRA y z)) // result: (ADDshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (ADD (SRA y z) x) - // result: (ADDshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMADDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (ADD x (RSBconst [0] y)) // result: (SUB x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { + continue + } + y := v_1.Args[0] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (RSBconst [0] y) x) - // result: (SUB x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMRSBconst || v_0.AuxInt != 0 { - break - } - y := v_0.Args[0] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD (RSBconst [c] x) (RSBconst [d] y)) // result: (RSBconst [c+d] (ADD x y)) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMRSBconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMRSBconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMRSBconst { + continue + } + d := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = c + d + v0 := b.NewValue0(v.Pos, OpARMADD, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMRSBconst { - break - } - d := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = c + d - v0 := b.NewValue0(v.Pos, OpARMADD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (ADD (RSBconst [d] y) (RSBconst [c] x)) - // result: (RSBconst [c+d] (ADD x y)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMRSBconst { - break - } - d := v_0.AuxInt - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMRSBconst { - break - } - c := v_1.AuxInt - x := v_1.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = c + d - v0 := b.NewValue0(v.Pos, OpARMADD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (ADD (MUL x y) a) // result: (MULA x y a) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMUL { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpARMMULA) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) - return true - } - // match: (ADD a (MUL x y)) - // result: (MULA x y a) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMUL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMMUL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v.Args[1^_i0] + v.reset(OpARMMULA) + v.AddArg(x) + v.AddArg(y) + v.AddArg(a) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpARMMULA) - v.AddArg(x) - v.AddArg(y) - v.AddArg(a) - return true + break } return false } @@ -1731,82 +1487,48 @@ func rewriteValueARM_OpARMADDD_0(v *Value) bool { // result: (MULAD a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMULD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + continue + } + v.reset(OpARMMULAD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULAD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDD (MULD x y) a) - // cond: a.Uses == 1 && objabi.GOARM >= 6 - // result: (MULAD a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMULD { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULAD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDD a (NMULD x y)) // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULSD a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMNMULD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMNMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + continue + } + v.reset(OpARMMULSD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULSD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDD (NMULD x y) a) - // cond: a.Uses == 1 && objabi.GOARM >= 6 - // result: (MULSD a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMNMULD { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULSD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -1816,82 +1538,48 @@ func rewriteValueARM_OpARMADDF_0(v *Value) bool { // result: (MULAF a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMULF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMULF { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + continue + } + v.reset(OpARMMULAF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULAF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDF (MULF x y) a) - // cond: a.Uses == 1 && objabi.GOARM >= 6 - // result: (MULAF a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMULF { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULAF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDF a (NMULF x y)) // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULSF a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMNMULF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMNMULF { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + continue + } + v.reset(OpARMMULSF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULSF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDF (NMULF x y) a) - // cond: a.Uses == 1 && objabi.GOARM >= 6 - // result: (MULSF a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMNMULF { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { - break - } - v.reset(OpARMMULSF) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -1900,231 +1588,139 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // result: (ADDSconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMADDSconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMADDSconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDS (MOVWconst [c]) x) - // result: (ADDSconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMADDSconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDS x (SLLconst [c] y)) // result: (ADDSshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADDSshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADDSshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDS (SLLconst [c] y) x) - // result: (ADDSshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMADDSshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDS x (SRLconst [c] y)) // result: (ADDSshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADDSshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADDSshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDS (SRLconst [c] y) x) - // result: (ADDSshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMADDSshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDS x (SRAconst [c] y)) // result: (ADDSshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMADDSshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMADDSshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDS (SRAconst [c] y) x) - // result: (ADDSshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMADDSshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDS x (SLL y z)) // result: (ADDSshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDSshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADDSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (ADDS (SLL y z) x) - // result: (ADDSshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMADDSshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMADDS_10(v *Value) bool { // match: (ADDS x (SRL y z)) // result: (ADDSshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDSshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADDSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (ADDS (SRL y z) x) - // result: (ADDSshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMADDSshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (ADDS x (SRA y z)) // result: (ADDSshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDSshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMADDSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (ADDS (SRA y z) x) - // result: (ADDSshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMADDSshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } @@ -2777,231 +2373,139 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // result: (ANDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMANDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVWconst [c]) x) - // result: (ANDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMANDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (AND x (SLLconst [c] y)) // result: (ANDshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMANDshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMANDshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (SLLconst [c] y) x) - // result: (ANDshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMANDshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND x (SRLconst [c] y)) // result: (ANDshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMANDshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMANDshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (SRLconst [c] y) x) - // result: (ANDshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMANDshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND x (SRAconst [c] y)) // result: (ANDshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMANDshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMANDshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (SRAconst [c] y) x) - // result: (ANDshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMANDshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND x (SLL y z)) // result: (ANDshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMANDshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMANDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (AND (SLL y z) x) - // result: (ANDshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMANDshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMAND_10(v *Value) bool { // match: (AND x (SRL y z)) // result: (ANDshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMANDshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMANDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (AND (SRL y z) x) - // result: (ANDshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMANDshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (AND x (SRA y z)) // result: (ANDshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMANDshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMANDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (AND (SRA y z) x) - // result: (ANDshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMANDshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (AND x x) // result: x @@ -3019,132 +2523,82 @@ func rewriteValueARM_OpARMAND_10(v *Value) bool { // result: (BIC x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMVN { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMVN { + continue + } + y := v_1.Args[0] + v.reset(OpARMBIC) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARMBIC) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (MVN y) x) - // result: (BIC x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMVN { - break - } - y := v_0.Args[0] - v.reset(OpARMBIC) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND x (MVNshiftLL y [c])) // result: (BICshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMVNshiftLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMVNshiftLL { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMBICshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMBICshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (MVNshiftLL y [c]) x) - // result: (BICshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMVNshiftLL { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMBICshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } + return false +} +func rewriteValueARM_OpARMAND_10(v *Value) bool { // match: (AND x (MVNshiftRL y [c])) // result: (BICshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMVNshiftRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMVNshiftRL { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMBICshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMBICshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM_OpARMAND_20(v *Value) bool { - // match: (AND (MVNshiftRL y [c]) x) - // result: (BICshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMVNshiftRL { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMBICshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND x (MVNshiftRA y [c])) // result: (BICshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMVNshiftRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMVNshiftRA { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMBICshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMBICshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (MVNshiftRA y [c]) x) - // result: (BICshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMVNshiftRA { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMBICshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -3947,260 +3401,157 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // result: (CMNconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMCMNconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (CMN (MOVWconst [c]) x) - // result: (CMNconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMCMNconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (CMN x (SLLconst [c] y)) // result: (CMNshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMCMNshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (CMN (SLLconst [c] y) x) - // result: (CMNshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMCMNshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (CMN x (SRLconst [c] y)) // result: (CMNshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMCMNshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (CMN (SRLconst [c] y) x) - // result: (CMNshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMCMNshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (CMN x (SRAconst [c] y)) // result: (CMNshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMCMNshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (CMN (SRAconst [c] y) x) - // result: (CMNshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMCMNshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (CMN x (SLL y z)) // result: (CMNshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMNshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMCMNshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (CMN (SLL y z) x) - // result: (CMNshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMCMNshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMCMN_10(v *Value) bool { // match: (CMN x (SRL y z)) // result: (CMNshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMNshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMCMNshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (CMN (SRL y z) x) - // result: (CMNshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMCMNshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (CMN x (SRA y z)) // result: (CMNshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMNshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMCMNshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (CMN (SRA y z) x) - // result: (CMNshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMCMNshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (CMN x (RSBconst [0] y)) // result: (CMP x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { + continue + } + y := v_1.Args[0] + v.reset(OpARMCMP) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARMCMP) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (CMN (RSBconst [0] y) x) - // result: (CMP x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMRSBconst || v_0.AuxInt != 0 { - break - } - y := v_0.Args[0] - v.reset(OpARMCMP) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -8671,431 +8022,256 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMMUL_0(v *Value) bool { + b := v.Block // match: (MUL x (MOVWconst [c])) // cond: int32(c) == -1 // result: (RSBconst [0] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == -1) { + continue + } + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpARMRSBconst) - v.AuxInt = 0 - v.AddArg(x) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: int32(c) == -1 - // result: (RSBconst [0] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpARMRSBconst) - v.AuxInt = 0 - v.AddArg(x) - return true + break } // match: (MUL _ (MOVWconst [0])) // result: (MOVWconst [0]) for { _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 0 { + continue + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } - // match: (MUL (MOVWconst [0]) _) - // result: (MOVWconst [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst || v_0.AuxInt != 0 { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true + break } // match: (MUL x (MOVWconst [1])) // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MUL (MOVWconst [1]) x) - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (MUL x (MOVWconst [c])) // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break } // match: (MUL x (MOVWconst [c])) // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (ADDshiftLL x x [log2(c-1)]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + continue + } + v.reset(OpARMADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break - } - v.reset(OpARMADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true + break } - // match: (MUL (MOVWconst [c]) x) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (RSBshiftLL x x [log2(c+1)]) for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + continue + } + v.reset(OpARMRSBshiftLL) + v.AuxInt = log2(c + 1) + v.AddArg(x) + v.AddArg(x) + return true } - c := v_0.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - v.reset(OpARMADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break } return false } func rewriteValueARM_OpARMMUL_10(v *Value) bool { - b := v.Block - // match: (MUL x (MOVWconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (RSBshiftLL x x [log2(c+1)]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARMRSBshiftLL) - v.AuxInt = log2(c + 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (RSBshiftLL x x [log2(c+1)]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARMRSBshiftLL) - v.AuxInt = log2(c + 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MUL x (MOVWconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL x (MOVWconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL x (MOVWconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL x (MOVWconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVWconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM_OpARMMUL_20(v *Value) bool { // match: (MUL (MOVWconst [c]) (MOVWconst [d])) // result: (MOVWconst [int64(int32(c*d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMMOVWconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + d := v_1.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(c * d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - d := v_1.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(c * d)) - return true - } - // match: (MUL (MOVWconst [d]) (MOVWconst [c])) - // result: (MOVWconst [int64(int32(c*d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(c * d)) - return true + break } return false } @@ -9569,39 +8745,24 @@ func rewriteValueARM_OpARMMULD_0(v *Value) bool { // match: (MULD (NEGD x) y) // cond: objabi.GOARM >= 6 // result: (NMULD x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMNEGD { - break - } - x := v_0.Args[0] - if !(objabi.GOARM >= 6) { - break - } - v.reset(OpARMNMULD) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MULD y (NEGD x)) - // cond: objabi.GOARM >= 6 - // result: (NMULD x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMNEGD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMNEGD { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(objabi.GOARM >= 6) { + continue + } + v.reset(OpARMNMULD) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - if !(objabi.GOARM >= 6) { - break - } - v.reset(OpARMNMULD) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -9609,39 +8770,24 @@ func rewriteValueARM_OpARMMULF_0(v *Value) bool { // match: (MULF (NEGF x) y) // cond: objabi.GOARM >= 6 // result: (NMULF x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMNEGF { - break - } - x := v_0.Args[0] - if !(objabi.GOARM >= 6) { - break - } - v.reset(OpARMNMULF) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MULF y (NEGF x)) - // cond: objabi.GOARM >= 6 - // result: (NMULF x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMNEGF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMNEGF { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + if !(objabi.GOARM >= 6) { + continue + } + v.reset(OpARMNMULF) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - if !(objabi.GOARM >= 6) { - break - } - v.reset(OpARMNMULF) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -10357,64 +9503,42 @@ func rewriteValueARM_OpARMNEGF_0(v *Value) bool { func rewriteValueARM_OpARMNMULD_0(v *Value) bool { // match: (NMULD (NEGD x) y) // result: (MULD x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMNEGD { - break - } - x := v_0.Args[0] - v.reset(OpARMMULD) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (NMULD y (NEGD x)) - // result: (MULD x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMNEGD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMNEGD { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARMMULD) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARMMULD) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } func rewriteValueARM_OpARMNMULF_0(v *Value) bool { // match: (NMULF (NEGF x) y) // result: (MULF x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMNEGF { - break - } - x := v_0.Args[0] - v.reset(OpARMMULF) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (NMULF y (NEGF x)) - // result: (MULF x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMNEGF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARMNEGF { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARMMULF) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARMMULF) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -10493,231 +9617,139 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // result: (ORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVWconst [c]) x) - // result: (ORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (OR x (SLLconst [c] y)) // result: (ORshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMORshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (SLLconst [c] y) x) - // result: (ORshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMORshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (OR x (SRLconst [c] y)) // result: (ORshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMORshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMORshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (SRLconst [c] y) x) - // result: (ORshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMORshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (OR x (SRAconst [c] y)) // result: (ORshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (SRAconst [c] y) x) - // result: (ORshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (OR x (SLL y z)) // result: (ORshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMORshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (OR (SLL y z) x) - // result: (ORshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMOR_10(v *Value) bool { // match: (OR x (SRL y z)) // result: (ORshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMORshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (OR (SRL y z) x) - // result: (ORshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (OR x (SRA y z)) // result: (ORshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMORshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (OR (SRA y z) x) - // result: (ORshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (OR x x) // result: x @@ -10825,7 +9857,7 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [c] (SRLconst x [32-c]) x) + // match: ( ORshiftLL [c] (SRLconst x [32-c]) x) // result: (SRRconst [32-c] x) for { c := v.AuxInt @@ -11068,7 +10100,7 @@ func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftRL [c] (SLLconst x [32-c]) x) + // match: ( ORshiftRL [c] (SLLconst x [32-c]) x) // result: (SRRconst [ c] x) for { c := v.AuxInt @@ -14244,231 +13276,139 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // result: (TEQconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMTEQconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMTEQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TEQ (MOVWconst [c]) x) - // result: (TEQconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMTEQconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TEQ x (SLLconst [c] y)) // result: (TEQshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTEQshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMTEQshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (TEQ (SLLconst [c] y) x) - // result: (TEQshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMTEQshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (TEQ x (SRLconst [c] y)) // result: (TEQshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTEQshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMTEQshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (TEQ (SRLconst [c] y) x) - // result: (TEQshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMTEQshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (TEQ x (SRAconst [c] y)) // result: (TEQshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTEQshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMTEQshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (TEQ (SRAconst [c] y) x) - // result: (TEQshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMTEQshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (TEQ x (SLL y z)) // result: (TEQshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTEQshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMTEQshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (TEQ (SLL y z) x) - // result: (TEQshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMTEQshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMTEQ_10(v *Value) bool { // match: (TEQ x (SRL y z)) // result: (TEQshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTEQshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMTEQshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (TEQ (SRL y z) x) - // result: (TEQshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMTEQshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (TEQ x (SRA y z)) // result: (TEQshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTEQshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMTEQshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (TEQ (SRA y z) x) - // result: (TEQshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMTEQshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } @@ -14759,231 +13699,139 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // result: (TSTconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMTSTconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TST (MOVWconst [c]) x) - // result: (TSTconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMTSTconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TST x (SLLconst [c] y)) // result: (TSTshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMTSTshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (TST (SLLconst [c] y) x) - // result: (TSTshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMTSTshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (TST x (SRLconst [c] y)) // result: (TSTshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMTSTshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (TST (SRLconst [c] y) x) - // result: (TSTshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMTSTshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (TST x (SRAconst [c] y)) // result: (TSTshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMTSTshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (TST (SRAconst [c] y) x) - // result: (TSTshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMTSTshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (TST x (SLL y z)) // result: (TSTshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTSTshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMTSTshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } - // match: (TST (SLL y z) x) - // result: (TSTshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMTSTshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - return false -} -func rewriteValueARM_OpARMTST_10(v *Value) bool { // match: (TST x (SRL y z)) // result: (TSTshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTSTshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMTSTshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (TST (SRL y z) x) - // result: (TSTshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMTSTshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (TST x (SRA y z)) // result: (TSTshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTSTshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMTSTshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (TST (SRA y z) x) - // result: (TSTshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMTSTshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } @@ -15274,264 +14122,159 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARMXORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVWconst [c]) x) - // result: (XORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpARMXORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR x (SLLconst [c] y)) // result: (XORshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMXORshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (SLLconst [c] y) x) - // result: (XORshiftLL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMXORshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR x (SRLconst [c] y)) // result: (XORshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRLconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMXORshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (SRLconst [c] y) x) - // result: (XORshiftRL x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMXORshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR x (SRAconst [c] y)) // result: (XORshiftRA x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRAconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMXORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (SRAconst [c] y) x) - // result: (XORshiftRA x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMXORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR x (SRRconst [c] y)) // result: (XORshiftRR x y [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRRconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRRconst { + continue + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftRR) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMXORshiftRR) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (XOR (SRRconst [c] y) x) - // result: (XORshiftRR x y [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRRconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - v.reset(OpARMXORshiftRR) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM_OpARMXOR_10(v *Value) bool { // match: (XOR x (SLL y z)) // result: (XORshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMXORshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMXORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (XOR (SLL y z) x) - // result: (XORshiftLLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMXORshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (XOR x (SRL y z)) // result: (XORshiftRLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMXORshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMXORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (XOR (SRL y z) x) - // result: (XORshiftRLreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMXORshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (XOR x (SRA y z)) // result: (XORshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMXORshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpARMXORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (XOR (SRA y z) x) - // result: (XORshiftRAreg x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpARMXORshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } // match: (XOR x x) // result: (MOVWconst [0]) @@ -20696,17 +19439,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMADD { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMEQ) + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMEQ) - v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPconst [0] l:(MULA x y a)) yes no) // cond: l.Uses==1 @@ -20928,17 +19675,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMAND { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMEQ) + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMEQ) - v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) // cond: l.Uses==1 @@ -21132,17 +19883,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMXOR { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMEQ) + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMEQ) - v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPconst [0] l:(XORconst [c] x)) yes no) // cond: l.Uses==1 @@ -21610,17 +20365,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMADD { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMGE) + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMGE) - v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPconst [0] l:(MULA x y a)) yes no) // cond: l.Uses==1 @@ -21842,17 +20601,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMAND { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMGE) + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMGE) - v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPconst [0] l:(ANDconst [c] x)) yes no) // cond: l.Uses==1 @@ -22046,17 +20809,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMXOR { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMGE) + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMGE) - v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPconst [0] l:(XORconst [c] x)) yes no) // cond: l.Uses==1 @@ -22525,17 +21292,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMADD { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMGT) + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMGT) - v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPconst [0] l:(ADDconst [c] x)) yes no) // cond: l.Uses==1 @@ -22729,17 +21500,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMAND { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMGT) + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMGT) - v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPconst [0] l:(MULA x y a)) yes no) // cond: l.Uses==1 @@ -22961,17 +21736,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMXOR { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMGT) + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMGT) - v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPconst [0] l:(XORconst [c] x)) yes no) // cond: l.Uses==1 @@ -23541,17 +22320,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMADD { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMLE) + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMLE) - v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPconst [0] l:(MULA x y a)) yes no) // cond: l.Uses==1 @@ -23773,17 +22556,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMAND { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMLE) + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMLE) - v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPconst [0] l:(ANDconst [c] x)) yes no) // cond: l.Uses==1 @@ -23977,17 +22764,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMXOR { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMLE) + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMLE) - v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPconst [0] l:(XORconst [c] x)) yes no) // cond: l.Uses==1 @@ -24456,17 +23247,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMADD { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMLT) + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMLT) - v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPconst [0] l:(MULA x y a)) yes no) // cond: l.Uses==1 @@ -24688,17 +23483,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMAND { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMLT) + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMLT) - v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPconst [0] l:(ANDconst [c] x)) yes no) // cond: l.Uses==1 @@ -24892,17 +23691,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMXOR { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMLT) + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMLT) - v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPconst [0] l:(XORconst [c] x)) yes no) // cond: l.Uses==1 @@ -25529,17 +24332,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMADD { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMNE) + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMNE) - v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPconst [0] l:(MULA x y a)) yes no) // cond: l.Uses==1 @@ -25761,17 +24568,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMAND { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMNE) + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMNE) - v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPconst [0] l:(ANDconst [c] x)) yes no) // cond: l.Uses==1 @@ -25965,17 +24776,21 @@ func rewriteBlockARM(b *Block) bool { if l.Op != OpARMXOR { break } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1) { - break + _ = l.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := l.Args[_i0] + y := l.Args[1^_i0] + if !(l.Uses == 1) { + continue + } + b.Reset(BlockARMNE) + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARMNE) - v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPconst [0] l:(XORconst [c] x)) yes no) // cond: l.Uses==1 diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 485aee546a..7fab25a579 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -10,7 +10,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64ADCSflags: return rewriteValueARM64_OpARM64ADCSflags_0(v) case OpARM64ADD: - return rewriteValueARM64_OpARM64ADD_0(v) || rewriteValueARM64_OpARM64ADD_10(v) || rewriteValueARM64_OpARM64ADD_20(v) + return rewriteValueARM64_OpARM64ADD_0(v) || rewriteValueARM64_OpARM64ADD_10(v) case OpARM64ADDconst: return rewriteValueARM64_OpARM64ADDconst_0(v) case OpARM64ADDshiftLL: @@ -20,7 +20,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64ADDshiftRL: return rewriteValueARM64_OpARM64ADDshiftRL_0(v) case OpARM64AND: - return rewriteValueARM64_OpARM64AND_0(v) || rewriteValueARM64_OpARM64AND_10(v) + return rewriteValueARM64_OpARM64AND_0(v) case OpARM64ANDconst: return rewriteValueARM64_OpARM64ANDconst_0(v) case OpARM64ANDshiftLL: @@ -156,9 +156,9 @@ func rewriteValueARM64(v *Value) bool { case OpARM64MADDW: return rewriteValueARM64_OpARM64MADDW_0(v) || rewriteValueARM64_OpARM64MADDW_10(v) || rewriteValueARM64_OpARM64MADDW_20(v) case OpARM64MNEG: - return rewriteValueARM64_OpARM64MNEG_0(v) || rewriteValueARM64_OpARM64MNEG_10(v) || rewriteValueARM64_OpARM64MNEG_20(v) + return rewriteValueARM64_OpARM64MNEG_0(v) || rewriteValueARM64_OpARM64MNEG_10(v) case OpARM64MNEGW: - return rewriteValueARM64_OpARM64MNEGW_0(v) || rewriteValueARM64_OpARM64MNEGW_10(v) || rewriteValueARM64_OpARM64MNEGW_20(v) + return rewriteValueARM64_OpARM64MNEGW_0(v) || rewriteValueARM64_OpARM64MNEGW_10(v) case OpARM64MOD: return rewriteValueARM64_OpARM64MOD_0(v) case OpARM64MODW: @@ -176,7 +176,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64MOVBreg: return rewriteValueARM64_OpARM64MOVBreg_0(v) case OpARM64MOVBstore: - return rewriteValueARM64_OpARM64MOVBstore_0(v) || rewriteValueARM64_OpARM64MOVBstore_10(v) || rewriteValueARM64_OpARM64MOVBstore_20(v) || rewriteValueARM64_OpARM64MOVBstore_30(v) || rewriteValueARM64_OpARM64MOVBstore_40(v) || rewriteValueARM64_OpARM64MOVBstore_50(v) + return rewriteValueARM64_OpARM64MOVBstore_0(v) || rewriteValueARM64_OpARM64MOVBstore_10(v) || rewriteValueARM64_OpARM64MOVBstore_20(v) || rewriteValueARM64_OpARM64MOVBstore_30(v) case OpARM64MOVBstoreidx: return rewriteValueARM64_OpARM64MOVBstoreidx_0(v) || rewriteValueARM64_OpARM64MOVBstoreidx_10(v) case OpARM64MOVBstorezero: @@ -266,9 +266,9 @@ func rewriteValueARM64(v *Value) bool { case OpARM64MSUBW: return rewriteValueARM64_OpARM64MSUBW_0(v) || rewriteValueARM64_OpARM64MSUBW_10(v) || rewriteValueARM64_OpARM64MSUBW_20(v) case OpARM64MUL: - return rewriteValueARM64_OpARM64MUL_0(v) || rewriteValueARM64_OpARM64MUL_10(v) || rewriteValueARM64_OpARM64MUL_20(v) + return rewriteValueARM64_OpARM64MUL_0(v) || rewriteValueARM64_OpARM64MUL_10(v) case OpARM64MULW: - return rewriteValueARM64_OpARM64MULW_0(v) || rewriteValueARM64_OpARM64MULW_10(v) || rewriteValueARM64_OpARM64MULW_20(v) + return rewriteValueARM64_OpARM64MULW_0(v) || rewriteValueARM64_OpARM64MULW_10(v) case OpARM64MVN: return rewriteValueARM64_OpARM64MVN_0(v) case OpARM64MVNshiftLL: @@ -288,7 +288,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64NotEqual: return rewriteValueARM64_OpARM64NotEqual_0(v) case OpARM64OR: - return rewriteValueARM64_OpARM64OR_0(v) || rewriteValueARM64_OpARM64OR_10(v) || rewriteValueARM64_OpARM64OR_20(v) || rewriteValueARM64_OpARM64OR_30(v) || rewriteValueARM64_OpARM64OR_40(v) || rewriteValueARM64_OpARM64OR_50(v) + return rewriteValueARM64_OpARM64OR_0(v) || rewriteValueARM64_OpARM64OR_10(v) || rewriteValueARM64_OpARM64OR_20(v) case OpARM64ORN: return rewriteValueARM64_OpARM64ORN_0(v) case OpARM64ORNshiftLL: @@ -300,7 +300,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64ORconst: return rewriteValueARM64_OpARM64ORconst_0(v) case OpARM64ORshiftLL: - return rewriteValueARM64_OpARM64ORshiftLL_0(v) || rewriteValueARM64_OpARM64ORshiftLL_10(v) || rewriteValueARM64_OpARM64ORshiftLL_20(v) || rewriteValueARM64_OpARM64ORshiftLL_30(v) + return rewriteValueARM64_OpARM64ORshiftLL_0(v) || rewriteValueARM64_OpARM64ORshiftLL_10(v) || rewriteValueARM64_OpARM64ORshiftLL_20(v) case OpARM64ORshiftRA: return rewriteValueARM64_OpARM64ORshiftRA_0(v) case OpARM64ORshiftRL: @@ -362,7 +362,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64UMODW: return rewriteValueARM64_OpARM64UMODW_0(v) case OpARM64XOR: - return rewriteValueARM64_OpARM64XOR_0(v) || rewriteValueARM64_OpARM64XOR_10(v) + return rewriteValueARM64_OpARM64XOR_0(v) case OpARM64XORconst: return rewriteValueARM64_OpARM64XORconst_0(v) case OpARM64XORshiftLL: @@ -1010,498 +1010,289 @@ func rewriteValueARM64_OpARM64ADCSflags_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (ADD x (MOVDconst [c])) // result: (ADDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64ADDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64ADDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (MOVDconst [c]) x) - // result: (ADDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64ADDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD a l:(MUL x y)) // cond: l.Uses==1 && clobber(l) // result: (MADD a x y) for { _ = v.Args[1] - a := v.Args[0] - l := v.Args[1] - if l.Op != OpARM64MUL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpARM64MUL { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MADD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MADD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD l:(MUL x y) a) - // cond: l.Uses==1 && clobber(l) - // result: (MADD a x y) - for { - a := v.Args[1] - l := v.Args[0] - if l.Op != OpARM64MUL { - break - } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MADD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD a l:(MNEG x y)) // cond: l.Uses==1 && clobber(l) // result: (MSUB a x y) for { _ = v.Args[1] - a := v.Args[0] - l := v.Args[1] - if l.Op != OpARM64MNEG { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpARM64MNEG { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MSUB) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MSUB) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD l:(MNEG x y) a) - // cond: l.Uses==1 && clobber(l) - // result: (MSUB a x y) - for { - a := v.Args[1] - l := v.Args[0] - if l.Op != OpARM64MNEG { - break - } - y := l.Args[1] - x := l.Args[0] - if !(l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MSUB) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD a l:(MULW x y)) // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) // result: (MADDW a x y) for { _ = v.Args[1] - a := v.Args[0] - l := v.Args[1] - if l.Op != OpARM64MULW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpARM64MULW { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MADDW) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := l.Args[1] - x := l.Args[0] - if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MADDW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD l:(MULW x y) a) - // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) - // result: (MADDW a x y) - for { - a := v.Args[1] - l := v.Args[0] - if l.Op != OpARM64MULW { - break - } - y := l.Args[1] - x := l.Args[0] - if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MADDW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD a l:(MNEGW x y)) // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) // result: (MSUBW a x y) for { _ = v.Args[1] - a := v.Args[0] - l := v.Args[1] - if l.Op != OpARM64MNEGW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + l := v.Args[1^_i0] + if l.Op != OpARM64MNEGW { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MSUBW) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := l.Args[1] - x := l.Args[0] - if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MSUBW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (ADD l:(MNEGW x y) a) - // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) - // result: (MSUBW a x y) - for { - a := v.Args[1] - l := v.Args[0] - if l.Op != OpARM64MNEGW { - break - } - y := l.Args[1] - x := l.Args[0] - if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { - break - } - v.reset(OpARM64MSUBW) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (ADD x (NEG y)) // result: (SUB x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64NEG { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64NEG { + continue + } + y := v_1.Args[0] + v.reset(OpARM64SUB) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARM64SUB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (NEG y) x) - // result: (SUB x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64NEG { - break - } - y := v_0.Args[0] - v.reset(OpARM64SUB) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ADDshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SLLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (ADD x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ADDshiftLL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (ADD x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ADDshiftRL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ADDshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ADDshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (ADD x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ADDshiftRL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ADDshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (ADD x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ADDshiftRA x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRAconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ADDshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ADDshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (ADD x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ADDshiftRA x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ADDshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (ADD (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x (NEG y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (ADD (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SLL x (ANDconst [63] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (ROR x (NEG y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt64 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SRL || v_0_0.Type != typ.UInt64 { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 64 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 63 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 64 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 63 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true + break } return false } -func rewriteValueARM64_OpARM64ADD_20(v *Value) bool { +func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ADD (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) @@ -1509,419 +1300,223 @@ func rewriteValueARM64_OpARM64ADD_20(v *Value) bool { // result: (ROR x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SRL x (ANDconst [63] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (ROR x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt64 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 64 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 63 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 64 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 63 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SRL || v_1.Type != typ.UInt64 { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x (NEG y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (ADD (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SLL x (ANDconst [31] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (RORW x (NEG y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt32 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SRL || v_0_0.Type != typ.UInt32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpARM64MOVWUreg { - break - } - x := v_0_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 32 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 31 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 32 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 31 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (ADD (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64MOVWUreg { + continue + } + x := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64MOVWUreg { - break - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SRL (MOVWUreg x) (ANDconst [31] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (RORW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt32 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 32 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 31 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 32 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 31 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SRL || v_1.Type != typ.UInt32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVWUreg || x != v_1_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -2242,30 +1837,19 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // result: (ANDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64ANDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [c]) x) - // result: (ANDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (AND x x) // result: x @@ -2283,156 +1867,91 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // result: (BIC x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MVN { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MVN { + continue + } + y := v_1.Args[0] + v.reset(OpARM64BIC) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARM64BIC) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (MVN y) x) - // result: (BIC x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { - break - } - y := v_0.Args[0] - v.reset(OpARM64BIC) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SLLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ANDshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (AND x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ANDshiftLL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ANDshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (AND x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftRL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ANDshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (AND x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ANDshiftRL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ANDshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (AND x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftRA x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRAconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ANDshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64AND_10(v *Value) bool { - // match: (AND x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ANDshiftRA x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ANDshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } return false } @@ -2954,153 +2473,91 @@ func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { // result: (CMNconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64CMNconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64CMNconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (CMN (MOVDconst [c]) x) - // result: (CMNconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64CMNconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (CMN x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (CMNshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SLLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64CMNshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64CMNshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (CMN x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (CMNshiftLL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64CMNshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (CMN x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (CMNshiftRL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64CMNshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64CMNshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (CMN x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (CMNshiftRL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64CMNshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (CMN x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (CMNshiftRA x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRAconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64CMNshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64CMNshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (CMN x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (CMNshiftRA x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64CMNshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } return false } @@ -3109,30 +2566,19 @@ func rewriteValueARM64_OpARM64CMNW_0(v *Value) bool { // result: (CMNWconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64CMNWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64CMNWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (CMNW (MOVDconst [c]) x) - // result: (CMNWconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64CMNWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -4536,67 +3982,41 @@ func rewriteValueARM64_OpARM64FADDD_0(v *Value) bool { // result: (FMADDD a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FMULD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64FMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + v.reset(OpARM64FMADDD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FADDD (FMULD x y) a) - // result: (FMADDD a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FMULD { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (FADDD a (FNMULD x y)) // result: (FMSUBD a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNMULD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64FNMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + v.reset(OpARM64FMSUBD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpARM64FMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FADDD (FNMULD x y) a) - // result: (FMSUBD a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNMULD { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpARM64FMSUBD) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -4605,67 +4025,41 @@ func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool { // result: (FMADDS a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FMULS { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64FMULS { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + v.reset(OpARM64FMADDS) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpARM64FMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FADDS (FMULS x y) a) - // result: (FMADDS a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FMULS { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpARM64FMADDS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (FADDS a (FNMULS x y)) // result: (FMSUBS a x y) for { _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNMULS { - break + for _i0 := 0; _i0 <= 1; _i0++ { + a := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64FNMULS { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + v.reset(OpARM64FMSUBS) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FADDS (FNMULS x y) a) - // result: (FMSUBS a x y) - for { - a := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNMULS { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -5314,64 +4708,42 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx_0(v *Value) bool { func rewriteValueARM64_OpARM64FMULD_0(v *Value) bool { // match: (FMULD (FNEGD x) y) // result: (FNMULD x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGD { - break - } - x := v_0.Args[0] - v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FMULD y (FNEGD x)) - // result: (FNMULD x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64FNEGD { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARM64FNMULD) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } func rewriteValueARM64_OpARM64FMULS_0(v *Value) bool { // match: (FMULS (FNEGS x) y) // result: (FNMULS x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGS { - break - } - x := v_0.Args[0] - v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FMULS y (FNEGS x)) - // result: (FNMULS x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGS { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64FNEGS { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARM64FNMULS) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -5440,64 +4812,42 @@ func rewriteValueARM64_OpARM64FNEGS_0(v *Value) bool { func rewriteValueARM64_OpARM64FNMULD_0(v *Value) bool { // match: (FNMULD (FNEGD x) y) // result: (FMULD x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGD { - break - } - x := v_0.Args[0] - v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FNMULD y (FNEGD x)) - // result: (FMULD x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64FNEGD { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARM64FMULD) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } func rewriteValueARM64_OpARM64FNMULS_0(v *Value) bool { // match: (FNMULS (FNEGS x) y) // result: (FMULS x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGS { - break - } - x := v_0.Args[0] - v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FNMULS y (FNEGS x)) - // result: (FMULS x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGS { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64FNEGS { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARM64FMULS) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -7229,443 +6579,259 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MNEG (MOVDconst [-1]) x) - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (MNEG _ (MOVDconst [0])) // result: (MOVDconst [0]) for { _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MNEG (MOVDconst [0]) _) - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != 0 { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true + break } // match: (MNEG x (MOVDconst [1])) // result: (NEG x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + continue + } + v.reset(OpARM64NEG) + v.AddArg(x) + return true } - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } - // match: (MNEG (MOVDconst [1]) x) - // result: (NEG x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != 1 { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true + break } // match: (MNEG x (MOVDconst [c])) // cond: isPowerOfTwo(c) // result: (NEG (SLLconst [log2(c)] x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (NEG (SLLconst [log2(c)] x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (MNEG x (MOVDconst [c])) // cond: isPowerOfTwo(c-1) && c >= 3 // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && c >= 3) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && c >= 3) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true + break } - // match: (MNEG (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (NEG (ADDshiftLL x x [log2(c-1)])) + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && c >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && c >= 7) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_0.AuxInt - if !(isPowerOfTwo(c-1) && c >= 3) { - break + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3)) { + continue + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7)) { + continue + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break } return false } func rewriteValueARM64_OpARM64MNEG_10(v *Value) bool { - b := v.Block - // match: (MNEG x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && c >= 7) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c+1) && c >= 7) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (MNEG x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEG x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM64_OpARM64MNEG_20(v *Value) bool { // match: (MNEG (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [-c*d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -c * d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -c * d - return true - } - // match: (MNEG (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [-c*d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -c * d - return true + break } return false } @@ -7676,472 +6842,273 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == -1) { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: int32(c)==-1 - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (MNEGW _ (MOVDconst [c])) // cond: int32(c)==0 // result: (MOVDconst [0]) for { _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == 0) { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true } - c := v_1.AuxInt - if !(int32(c) == 0) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MNEGW (MOVDconst [c]) _) - // cond: int32(c)==0 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == 0) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true + break } // match: (MNEGW x (MOVDconst [c])) // cond: int32(c)==1 // result: (NEG x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == 1) { + continue + } + v.reset(OpARM64NEG) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(int32(c) == 1) { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: int32(c)==1 - // result: (NEG x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == 1) { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true + break } // match: (MNEGW x (MOVDconst [c])) // cond: isPowerOfTwo(c) // result: (NEG (SLLconst [log2(c)] x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (NEG (SLLconst [log2(c)] x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (MNEGW x (MOVDconst [c])) // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true + break } - // match: (MNEGW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (NEG (ADDshiftLL x x [log2(c-1)])) + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_0.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break } return false } func rewriteValueARM64_OpARM64MNEGW_10(v *Value) bool { - b := v.Block - // match: (MNEGW x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM64_OpARM64MNEGW_20(v *Value) bool { // match: (MNEGW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [-int64(int32(c)*int32(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -int64(int32(c) * int32(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -int64(int32(c) * int32(d)) - return true - } - // match: (MNEGW (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [-int64(int32(c)*int32(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -int64(int32(c) * int32(d)) - return true + break } return false } @@ -8950,66 +7917,33 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { + continue + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) @@ -9054,66 +7988,33 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) { + continue + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) @@ -9158,66 +8059,33 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) { + continue + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) @@ -9253,9 +8121,6 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVHstoreidx ptr1 idx1 w mem) @@ -9269,74 +8134,37 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { + continue + } + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) @@ -9383,71 +8211,40 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { + continue + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w0 mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { + b := v.Block // match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) // result: (MOVHstore [i-1] {s} ptr0 w0 mem) @@ -9497,78 +8294,39 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { + continue + } + bfc := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64UBFX { + continue + } + bfc2 := w0.AuxInt + if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true } - bfc := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64UBFX { - break - } - bfc2 := w0.AuxInt - if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w0 mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - bfc := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64UBFX { - break - } - bfc2 := w0.AuxInt - if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) @@ -9623,90 +8381,43 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { + continue + } + j := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { + continue + } + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 { + continue + } + w0_0 := w0.Args[0] + if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true } - j := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 { - break - } - w0_0 := w0.Args[0] - if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { - b := v.Block - // match: (MOVBstore [1] {s} (ADD idx0 ptr0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w0 mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 { - break - } - w0_0 := w0.Args[0] - if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w) x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w) x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w) x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem)))))))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) @@ -9891,137 +8602,35 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] { + continue + } + x6 := x5.Args[2] + if x6.Op != OpARM64MOVBstoreidx { + continue + } + mem := x6.Args[3] + ptr0 := x6.Args[0] + idx0 := x6.Args[1] + x6_2 := x6.Args[2] + if x6_2.Op != OpARM64SRLconst || x6_2.AuxInt != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { + continue + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - x6 := x5.Args[2] - if x6.Op != OpARM64MOVBstoreidx { - break - } - mem := x6.Args[3] - ptr0 := x6.Args[0] - idx0 := x6.Args[1] - x6_2 := x6.Args[2] - if x6_2.Op != OpARM64SRLconst || x6_2.AuxInt != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD idx1 ptr1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVDstoreidx ptr0 idx0 (REV w) mem) - for { - if v.AuxInt != 7 { - break - } - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore || x0.AuxInt != 6 || x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] { - break - } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore || x1.AuxInt != 5 || x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { - break - } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstore || x2.AuxInt != 4 || x2.Aux != s { - break - } - _ = x2.Args[2] - if p != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] { - break - } - x3 := x2.Args[2] - if x3.Op != OpARM64MOVBstore || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[2] - if p != x3.Args[0] { - break - } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] { - break - } - x4 := x3.Args[2] - if x4.Op != OpARM64MOVBstore || x4.AuxInt != 2 || x4.Aux != s { - break - } - _ = x4.Args[2] - if p != x4.Args[0] { - break - } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] { - break - } - x5 := x4.Args[2] - if x5.Op != OpARM64MOVBstore || x5.AuxInt != 1 || x5.Aux != s { - break - } - _ = x5.Args[2] - p1 := x5.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] { - break - } - x6 := x5.Args[2] - if x6.Op != OpARM64MOVBstoreidx { - break - } - mem := x6.Args[3] - ptr0 := x6.Args[0] - idx0 := x6.Args[1] - x6_2 := x6.Args[2] - if x6_2.Op != OpARM64SRLconst || x6_2.AuxInt != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) @@ -10110,89 +8719,35 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] { + continue + } + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstoreidx { + continue + } + mem := x2.Args[3] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { - break - } - mem := x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD idx1 ptr1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) - for { - if v.AuxInt != 3 { - break - } - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] { - break - } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[2] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] { - break - } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { - break - } - mem := x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w)) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) @@ -10297,113 +8852,47 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 { + continue + } + x1_1_0 := x1_1.Args[0] + if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] { + continue + } + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstoreidx { + continue + } + mem := x2.Args[3] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 { + continue + } + x2_2_0 := x2_2.Args[0] + if x2_2_0.Op != OpARM64MOVDreg || w != x2_2_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - x1_1_0 := x1_1.Args[0] - if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] { - break - } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { - break - } - mem := x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 { - break - } - x2_2_0 := x2_2.Args[0] - if x2_2_0.Op != OpARM64MOVDreg || w != x2_2_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD idx1 ptr1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) - for { - if v.AuxInt != 3 { - break - } - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 { - break - } - x0_1_0 := x0_1.Args[0] - if x0_1_0.Op != OpARM64MOVDreg || w != x0_1_0.Args[0] { - break - } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[2] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 { - break - } - x1_1_0 := x1_1.Args[0] - if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] { - break - } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { - break - } - mem := x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 { - break - } - x2_2_0 := x2_2.Args[0] - if x2_2_0.Op != OpARM64MOVDreg || w != x2_2_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } return false } -func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { +func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { b := v.Block // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) @@ -10492,89 +8981,35 @@ func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { + continue + } + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstoreidx { + continue + } + mem := x2.Args[3] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { - break - } - mem := x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD idx1 ptr1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) - for { - if v.AuxInt != 3 { - break - } - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] { - break - } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[2] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { - break - } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { - break - } - mem := x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -10620,64 +9055,32 @@ func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx1 := v_0.Args[1] - ptr1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := v_0.Args[_i0] + idx1 := v_0.Args[1^_i0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx1 ptr1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr1 := v_0.Args[1] - idx1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -10723,64 +9126,32 @@ func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx1 := v_0.Args[1] - ptr1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := v_0.Args[_i0] + idx1 := v_0.Args[1^_i0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx1 ptr1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr1 := v_0.Args[1] - idx1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem)) // cond: x.Uses == 1 && clobber(x) @@ -10817,10 +9188,6 @@ func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_50(v *Value) bool { - b := v.Block // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) @@ -10834,72 +9201,36 @@ func rewriteValueARM64_OpARM64MOVBstore_50(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx1 := v_0.Args[1] - ptr1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := v_0.Args[_i0] + idx1 := v_0.Args[1^_i0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 { + continue + } + x_2_0 := x_2.Args[0] + if x_2_0.Op != OpARM64MOVDreg || w != x_2_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 { - break - } - x_2_0 := x_2.Args[0] - if x_2_0.Op != OpARM64MOVDreg || w != x_2_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx1 ptr1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr1 := v_0.Args[1] - idx1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 { - break - } - x_2_0 := x_2.Args[0] - if x_2_0.Op != OpARM64MOVDreg || w != x_2_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -10945,64 +9276,32 @@ func rewriteValueARM64_OpARM64MOVBstore_50(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx1 := v_0.Args[1] - ptr1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := v_0.Args[_i0] + idx1 := v_0.Args[1^_i0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + continue + } + mem := x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD idx1 ptr1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr1 := v_0.Args[1] - idx1 := v_0.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - mem := x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true + break } return false } @@ -11509,54 +9808,27 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVBstorezeroidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + x := v.Args[1] + if x.Op != OpARM64MOVBstorezeroidx { + continue + } + mem := x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVHstorezeroidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(mem) + return true } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true - } - // match: (MOVBstorezero [1] {s} (ADD idx0 ptr0) x:(MOVBstorezeroidx ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstorezeroidx ptr1 idx1 mem) - for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVBstorezeroidx { - break - } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true + break } return false } @@ -12334,56 +10606,28 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { if p0.Op != OpARM64ADD { break } - idx0 := p0.Args[1] - ptr0 := p0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVDstorezeroidx { - break + _ = p0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := p0.Args[_i0] + idx0 := p0.Args[1^_i0] + x := v.Args[1] + if x.Op != OpARM64MOVDstorezeroidx { + continue + } + mem := x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = 0 + v.Aux = s + v.AddArg(p0) + v.AddArg(mem) + return true } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = 0 - v.Aux = s - v.AddArg(p0) - v.AddArg(mem) - return true - } - // match: (MOVDstorezero [8] {s} p0:(ADD idx0 ptr0) x:(MOVDstorezeroidx ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVQstorezero [0] {s} p0 mem) - for { - if v.AuxInt != 8 { - break - } - s := v.Aux - _ = v.Args[1] - p0 := v.Args[0] - if p0.Op != OpARM64ADD { - break - } - ptr0 := p0.Args[1] - idx0 := p0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVDstorezeroidx { - break - } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = 0 - v.Aux = s - v.AddArg(p0) - v.AddArg(mem) - return true + break } // match: (MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -13608,66 +11852,33 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { + continue + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstore [2] {s} (ADD idx0 ptr0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -13752,66 +11963,33 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) { + continue + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstore [2] {s} (ADD idx0 ptr0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -13900,79 +12078,38 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { + continue + } + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } - // match: (MOVHstore [2] {s} (ADD idx0 ptr0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { - b := v.Block // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) @@ -14062,71 +12199,40 @@ func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { + continue + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstore [2] {s} (ADD idx0 ptr0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w0 mem) - for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { + b := v.Block // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w0 mem) @@ -14654,54 +12760,27 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVHstorezeroidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + x := v.Args[1] + if x.Op != OpARM64MOVHstorezeroidx { + continue + } + mem := x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(mem) + return true } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true - } - // match: (MOVHstorezero [2] {s} (ADD idx0 ptr0) x:(MOVHstorezeroidx ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstorezeroidx ptr1 idx1 mem) - for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVHstorezeroidx { - break - } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true + break } // match: (MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -16125,71 +14204,38 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 { + continue + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true + break } return false } func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { b := v.Block - // match: (MOVWstore [4] {s} (ADD idx0 ptr0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVDstoreidx ptr1 idx1 w mem) - for { - if v.AuxInt != 4 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) - return true - } // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) // result: (MOVDstoreidx ptr1 (SLLconst [2] idx1) w mem) @@ -16275,70 +14321,35 @@ func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { + continue + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstoreidx { + continue + } + mem := x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstore [4] {s} (ADD idx0 ptr0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVDstoreidx ptr1 idx1 w0 mem) - for { - if v.AuxInt != 4 { - break - } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx { - break - } - mem := x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -16750,54 +14761,27 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { if v_0.Op != OpARM64ADD { break } - idx0 := v_0.Args[1] - ptr0 := v_0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVWstorezeroidx { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr0 := v_0.Args[_i0] + idx0 := v_0.Args[1^_i0] + x := v.Args[1] + if x.Op != OpARM64MOVWstorezeroidx { + continue + } + mem := x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + continue + } + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(mem) + return true } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true - } - // match: (MOVWstorezero [4] {s} (ADD idx0 ptr0) x:(MOVWstorezeroidx ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVDstorezeroidx ptr1 idx1 mem) - for { - if v.AuxInt != 4 { - break - } - s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - ptr0 := v_0.Args[1] - idx0 := v_0.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVWstorezeroidx { - break - } - mem := x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true + break } // match: (MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -17911,942 +15895,559 @@ func rewriteValueARM64_OpARM64MSUBW_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { + b := v.Block // match: (MUL (NEG x) y) // result: (MNEG x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64NEG { - break - } - x := v_0.Args[0] - v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MUL y (NEG x)) - // result: (MNEG x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64NEG { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64NEG { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARM64MNEG) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (MUL x (MOVDconst [-1])) // result: (NEG x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { + continue + } + v.reset(OpARM64NEG) + v.AddArg(x) + return true } - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } - // match: (MUL (MOVDconst [-1]) x) - // result: (NEG x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != -1 { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true + break } // match: (MUL _ (MOVDconst [0])) // result: (MOVDconst [0]) for { _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MUL (MOVDconst [0]) _) - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != 0 { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true + break } // match: (MUL x (MOVDconst [1])) // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MUL (MOVDconst [1]) x) - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && c >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && c >= 3) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break + break + } + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && c >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && c >= 7) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break } return false } func rewriteValueARM64_OpARM64MUL_10(v *Value) bool { - b := v.Block - // match: (MUL x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && c >= 3) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c-1) && c >= 3) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MUL x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && c >= 7) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c+1) && c >= 7) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MUL x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM64_OpARM64MUL_20(v *Value) bool { b := v.Block // match: (MUL x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MUL (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (MUL (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c*d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = c * d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c * d - return true - } - // match: (MUL (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c*d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c * d - return true + break } return false } func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { + b := v.Block // match: (MULW (NEG x) y) // result: (MNEGW x y) - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64NEG { - break - } - x := v_0.Args[0] - v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MULW y (NEG x)) - // result: (MNEGW x y) for { _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64NEG { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64NEG { + continue + } + x := v_0.Args[0] + y := v.Args[1^_i0] + v.reset(OpARM64MNEGW) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_1.Args[0] - v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (MULW x (MOVDconst [c])) // cond: int32(c)==-1 // result: (NEG x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == -1) { + continue + } + v.reset(OpARM64NEG) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: int32(c)==-1 - // result: (NEG x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true + break } // match: (MULW _ (MOVDconst [c])) // cond: int32(c)==0 // result: (MOVDconst [0]) for { _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == 0) { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true } - c := v_1.AuxInt - if !(int32(c) == 0) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULW (MOVDconst [c]) _) - // cond: int32(c)==0 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == 0) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true + break } // match: (MULW x (MOVDconst [c])) // cond: int32(c)==1 // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(int32(c) == 1) { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(int32(c) == 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: int32(c)==1 - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (MULW x (MOVDconst [c])) // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break + break + } + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break } return false } func rewriteValueARM64_OpARM64MULW_10(v *Value) bool { - b := v.Block - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM64_OpARM64MULW_20(v *Value) bool { b := v.Block // match: (MULW x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (MULW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(int32(c)*int32(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c) * int32(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) - return true - } - // match: (MULW (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [int64(int32(c)*int32(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) - return true + break } return false } @@ -19183,34 +16784,25 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64OR_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (OR x (MOVDconst [c])) // result: (ORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVDconst [c]) x) - // result: (ORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (OR x x) // result: x @@ -19228,824 +16820,450 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // result: (ORN x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MVN { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MVN { + continue + } + y := v_1.Args[0] + v.reset(OpARM64ORN) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARM64ORN) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (MVN y) x) - // result: (ORN x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { - break - } - y := v_0.Args[0] - v.reset(OpARM64ORN) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (OR x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SLLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (OR x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ORshiftLL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (OR x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftRL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (OR x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ORshiftRL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (OR x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftRA x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRAconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64OR_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ORshiftRA x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64ORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (OR (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x (NEG y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (OR (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SLL x (ANDconst [63] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (ROR x (NEG y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt64 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SRL || v_0_0.Type != typ.UInt64 { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 64 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 63 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 64 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 63 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (OR (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SRL x (ANDconst [63] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (ROR x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt64 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 64 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 63 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 64 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 63 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SRL || v_1.Type != typ.UInt64 { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (OR (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x (NEG y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (OR (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SLL x (ANDconst [31] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (RORW x (NEG y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt32 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SRL || v_0_0.Type != typ.UInt32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpARM64MOVWUreg { - break - } - x := v_0_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 32 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 31 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 32 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 31 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (OR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64MOVWUreg { + continue + } + x := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64MOVWUreg { - break - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SRL (MOVWUreg x) (ANDconst [31] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (RORW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt32 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 32 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 31 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 32 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 31 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SRL || v_1.Type != typ.UInt32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVWUreg || x != v_1_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) - return true + break } + return false +} +func rewriteValueARM64_OpARM64OR_10(v *Value) bool { + b := v.Block // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) // cond: ac == ^((1< o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) @@ -20053,183 +17271,94 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + continue + } + i3 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + continue + } + i1 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] { + continue + } + y3 := v.Args[1^_i0] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + continue + } + i0 := x3.AuxInt + if x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.Aux = s + v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i3 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i1 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i0 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i0 := x3.AuxInt - s := x3.Aux - mem := x3.Args[1] - p := x3.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i3 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i1 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) @@ -20237,335 +17366,89 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 { + continue + } + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s { + continue + } + _ = x2.Args[1] + p1 := x2.Args[0] + if p1.Op != OpARM64ADD { + continue + } + _ = p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + ptr1 := p1.Args[_i1] + idx1 := p1.Args[1^_i1] + if mem != x2.Args[1] { + continue + } + y3 := v.Args[1^_i0] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + continue + } + _ = x3.Args[2] + ptr0 := x3.Args[0] + idx0 := x3.Args[1] + if mem != x3.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) + return true + } } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 { - break - } - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s { - break - } - _ = x2.Args[1] - p1 := x2.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - ptr0 := x3.Args[0] - idx0 := x3.Args[1] - if mem != x3.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 { - break - } - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s { - break - } - _ = x2.Args[1] - p1 := x2.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - ptr0 := x3.Args[0] - idx0 := x3.Args[1] - if mem != x3.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - mem := x3.Args[2] - ptr0 := x3.Args[0] - idx0 := x3.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 { - break - } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - if mem != x0.Args[1] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s { - break - } - _ = x2.Args[1] - p1 := x2.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - mem := x3.Args[2] - ptr0 := x3.Args[0] - idx0 := x3.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 { - break - } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - if mem != x0.Args[1] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s { - break - } - _ = x2.Args[1] - p1 := x2.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) @@ -20573,179 +17456,90 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + continue + } + mem := x0.Args[2] + ptr := x0.Args[0] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 3 { + continue + } + idx := x0_1.Args[0] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + continue + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + continue + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] { + continue + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + continue + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + continue + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] { + continue + } + y3 := v.Args[1^_i0] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + continue + } + _ = x3.Args[2] + if ptr != x3.Args[0] || idx != x3.Args[1] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 3 { - break - } - idx := x0_1.Args[0] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - if ptr != x3.Args[0] || idx != x3.Args[1] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64OR_30(v *Value) bool { - b := v.Block - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr idx mem) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - mem := x3.Args[2] - ptr := x3.Args[0] - idx := x3.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - if ptr != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 3 || idx != x0_1.Args[0] || mem != x0.Args[2] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) @@ -20753,351 +17547,178 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i7 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i6 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i5 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i4 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { - break - } - i3 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { - break - } - i2 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { - break - } - i1 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - i0 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} (OffPtr [i0] p) mem) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - i0 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i7 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i6 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i5 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i4 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { - break - } - i3 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { - break - } - i2 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { - break - } - i1 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(x6.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { + continue + } + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { + continue + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { + continue + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { + continue + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + continue + } + i7 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + continue + } + i6 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + continue + } + i5 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] { + continue + } + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + continue + } + i4 := x3.AuxInt + if x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] { + continue + } + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { + continue + } + i3 := x4.AuxInt + if x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + continue + } + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload { + continue + } + i2 := x5.AuxInt + if x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + continue + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload { + continue + } + i1 := x6.AuxInt + if x6.Aux != s { + continue + } + _ = x6.Args[1] + if p != x6.Args[0] || mem != x6.Args[1] { + continue + } + y7 := v.Args[1^_i0] + if y7.Op != OpARM64MOVDnop { + continue + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { + continue + } + i0 := x7.AuxInt + if x7.Aux != s { + continue + } + _ = x7.Args[1] + if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.Aux = s + v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) @@ -21105,607 +17726,157 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 { - break - } - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s { - break - } - _ = x6.Args[1] - p1 := x6.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - _ = x7.Args[2] - ptr0 := x7.Args[0] - idx0 := x7.Args[1] - if mem != x7.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 { - break - } - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s { - break - } - _ = x6.Args[1] - p1 := x6.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - _ = x7.Args[2] - ptr0 := x7.Args[0] - idx0 := x7.Args[1] - if mem != x7.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - mem := x7.Args[2] - ptr0 := x7.Args[0] - idx0 := x7.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 { - break - } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - if mem != x0.Args[1] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s { - break - } - _ = x6.Args[1] - p1 := x6.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x6.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - mem := x7.Args[2] - ptr0 := x7.Args[0] - idx0 := x7.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 { - break - } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - if mem != x0.Args[1] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s { - break - } - _ = x6.Args[1] - p1 := x6.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x6.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { + continue + } + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { + continue + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { + continue + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { + continue + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 { + continue + } + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] { + continue + } + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] { + continue + } + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + continue + } + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + continue + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s { + continue + } + _ = x6.Args[1] + p1 := x6.Args[0] + if p1.Op != OpARM64ADD { + continue + } + _ = p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + ptr1 := p1.Args[_i1] + idx1 := p1.Args[1^_i1] + if mem != x6.Args[1] { + continue + } + y7 := v.Args[1^_i0] + if y7.Op != OpARM64MOVDnop { + continue + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUloadidx { + continue + } + _ = x7.Args[2] + ptr0 := x7.Args[0] + idx0 := x7.Args[1] + if mem != x7.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) + return true + } + } + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) @@ -21713,343 +17884,174 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 7 { - break - } - idx := x0_1.Args[0] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - if ptr != x3.Args[0] { - break - } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { - break - } - _ = x4.Args[2] - if ptr != x4.Args[0] { - break - } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUloadidx { - break - } - _ = x5.Args[2] - if ptr != x5.Args[0] { - break - } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUloadidx { - break - } - _ = x6.Args[2] - if ptr != x6.Args[0] { - break - } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - _ = x7.Args[2] - if ptr != x7.Args[0] || idx != x7.Args[1] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr idx mem) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - mem := x7.Args[2] - ptr := x7.Args[0] - idx := x7.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - if ptr != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 7 || idx != x0_1.Args[0] || mem != x0.Args[2] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - if ptr != x3.Args[0] { - break - } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { - break - } - _ = x4.Args[2] - if ptr != x4.Args[0] { - break - } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUloadidx { - break - } - _ = x5.Args[2] - if ptr != x5.Args[0] { - break - } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUloadidx { - break - } - _ = x6.Args[2] - if ptr != x6.Args[0] { - break - } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { + continue + } + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { + continue + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { + continue + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { + continue + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + continue + } + mem := x0.Args[2] + ptr := x0.Args[0] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 7 { + continue + } + idx := x0_1.Args[0] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + continue + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + continue + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] { + continue + } + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + continue + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + continue + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] { + continue + } + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + continue + } + _ = x3.Args[2] + if ptr != x3.Args[0] { + continue + } + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] { + continue + } + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { + continue + } + _ = x4.Args[2] + if ptr != x4.Args[0] { + continue + } + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] { + continue + } + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + continue + } + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUloadidx { + continue + } + _ = x5.Args[2] + if ptr != x5.Args[0] { + continue + } + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] { + continue + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + continue + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUloadidx { + continue + } + _ = x6.Args[2] + if ptr != x6.Args[0] { + continue + } + x6_1 := x6.Args[1] + if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] { + continue + } + y7 := v.Args[1^_i0] + if y7.Op != OpARM64MOVDnop { + continue + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUloadidx { + continue + } + _ = x7.Args[2] + if ptr != x7.Args[0] || idx != x7.Args[1] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) @@ -22057,191 +18059,96 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + continue + } + i2 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] { + continue + } + y3 := v.Args[1^_i0] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + continue + } + i3 := x3.AuxInt + if x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) + v1.Aux = s + v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueARM64_OpARM64OR_40(v *Value) bool { - b := v.Block - // match: (OR y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt - s := x3.Aux - mem := x3.Args[1] - p := x3.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) @@ -22249,523 +18156,188 @@ func rewriteValueARM64_OpARM64OR_40(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + continue + } + mem := x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { + continue + } + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { + continue + } + _ = p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + ptr1 := p1.Args[_i1] + idx1 := p1.Args[1^_i1] + if mem != x1.Args[1] { + continue + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { + continue + } + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { + continue + } + y3 := v.Args[1^_i0] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 { - break - } - s := x3.Aux - mem := x3.Args[1] - p := x3.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - if mem != x0.Args[2] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 { - break - } - s := x3.Aux - mem := x3.Args[1] - p := x3.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - if mem != x0.Args[2] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } + return false +} +func rewriteValueARM64_OpARM64OR_20(v *Value) bool { + b := v.Block // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + continue + } + mem := x0.Args[2] + ptr := x0.Args[0] + idx := x0.Args[1] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + continue + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + continue + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { + continue + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + continue + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + continue + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] { + continue + } + y3 := v.Args[1^_i0] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + continue + } + _ = x3.Args[2] + if ptr != x3.Args[0] { + continue + } + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr := x0.Args[0] - idx := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - if ptr != x3.Args[0] { - break - } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) - for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - mem := x3.Args[2] - ptr := x3.Args[0] - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 { - break - } - idx := x3_1.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - if ptr != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) @@ -22773,355 +18345,180 @@ func rewriteValueARM64_OpARM64OR_40(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(x6.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { + continue + } + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { + continue + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { + continue + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { + continue + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + continue + } + i2 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] { + continue + } + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + continue + } + i3 := x3.AuxInt + if x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] { + continue + } + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { + continue + } + i4 := x4.AuxInt + if x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + continue + } + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload { + continue + } + i5 := x5.AuxInt + if x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + continue + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload { + continue + } + i6 := x6.AuxInt + if x6.Aux != s { + continue + } + _ = x6.Args[1] + if p != x6.Args[0] || mem != x6.Args[1] { + continue + } + y7 := v.Args[1^_i0] + if y7.Op != OpARM64MOVDnop { + continue + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { + continue + } + i7 := x7.AuxInt + if x7.Aux != s { + continue + } + _ = x7.Args[1] + if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x7.Pos, OpARM64REV, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) + v1.Aux = s + v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [7] {s} p mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) @@ -23129,619 +18526,159 @@ func rewriteValueARM64_OpARM64OR_40(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 || x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueARM64_OpARM64OR_50(v *Value) bool { - b := v.Block - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [7] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 || x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 { - break - } - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - if mem != x0.Args[2] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 { - break - } - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - if mem != x0.Args[2] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { + continue + } + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { + continue + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { + continue + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { + continue + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + continue + } + mem := x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { + continue + } + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { + continue + } + _ = p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + ptr1 := p1.Args[_i1] + idx1 := p1.Args[1^_i1] + if mem != x1.Args[1] { + continue + } + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { + continue + } + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { + continue + } + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] { + continue + } + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + continue + } + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + continue + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s { + continue + } + _ = x6.Args[1] + if p != x6.Args[0] || mem != x6.Args[1] { + continue + } + y7 := v.Args[1^_i0] + if y7.Op != OpARM64MOVDnop { + continue + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 || x7.Aux != s { + continue + } + _ = x7.Args[1] + if p != x7.Args[0] || mem != x7.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x7.Pos, OpARM64REV, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } + break } // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) @@ -23749,347 +18686,176 @@ func rewriteValueARM64_OpARM64OR_50(v *Value) bool { for { t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr := x0.Args[0] - idx := x0.Args[1] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - if ptr != x3.Args[0] { - break - } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { - break - } - _ = x4.Args[2] - if ptr != x4.Args[0] { - break - } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUloadidx { - break - } - _ = x5.Args[2] - if ptr != x5.Args[0] { - break - } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUloadidx { - break - } - _ = x6.Args[2] - if ptr != x6.Args[0] { - break - } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - _ = x7.Args[2] - if ptr != x7.Args[0] { - break - } - x7_1 := x7.Args[1] - if x7_1.Op != OpARM64ADDconst || x7_1.AuxInt != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr idx mem)) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { - break - } - mem := x7.Args[2] - ptr := x7.Args[0] - x7_1 := x7.Args[1] - if x7_1.Op != OpARM64ADDconst || x7_1.AuxInt != 7 { - break - } - idx := x7_1.Args[0] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { - break - } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { - break - } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { - break - } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { - break - } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - _ = x0.Args[2] - if ptr != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - if ptr != x2.Args[0] { - break - } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { - break - } - _ = x3.Args[2] - if ptr != x3.Args[0] { - break - } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { - break - } - _ = x4.Args[2] - if ptr != x4.Args[0] { - break - } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUloadidx { - break - } - _ = x5.Args[2] - if ptr != x5.Args[0] { - break - } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUloadidx { - break - } - _ = x6.Args[2] - if ptr != x6.Args[0] { - break - } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + o0 := v.Args[_i0] + if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { + continue + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 { + continue + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 { + continue + } + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 { + continue + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 { + continue + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 { + continue + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 { + continue + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + continue + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + continue + } + mem := x0.Args[2] + ptr := x0.Args[0] + idx := x0.Args[1] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + continue + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + continue + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { + continue + } + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + continue + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + continue + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] { + continue + } + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + continue + } + _ = x3.Args[2] + if ptr != x3.Args[0] { + continue + } + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] { + continue + } + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { + continue + } + _ = x4.Args[2] + if ptr != x4.Args[0] { + continue + } + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] { + continue + } + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + continue + } + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUloadidx { + continue + } + _ = x5.Args[2] + if ptr != x5.Args[0] { + continue + } + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] { + continue + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + continue + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUloadidx { + continue + } + _ = x6.Args[2] + if ptr != x6.Args[0] { + continue + } + x6_1 := x6.Args[1] + if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] { + continue + } + y7 := v.Args[1^_i0] + if y7.Op != OpARM64MOVDnop { + continue + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUloadidx { + continue + } + _ = x7.Args[2] + if ptr != x7.Args[0] { + continue + } + x7_1 := x7.Args[1] + if x7_1.Op != OpARM64ADDconst || x7_1.AuxInt != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + break } return false } @@ -24428,7 +19194,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { v.AddArg(y) return true } - // match: (ORshiftLL [c] (SRLconst x [64-c]) x) + // match: ( ORshiftLL [c] (SRLconst x [64-c]) x) // result: (RORconst [64-c] x) for { c := v.AuxInt @@ -24442,7 +19208,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [c] (UBFX [bfc] x) x) + // match: ( ORshiftLL [c] (UBFX [bfc] x) x) // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (RORWconst [32-c] x) for { @@ -24477,7 +19243,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [c] (SRLconst x [64-c]) x2) + // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { c := v.AuxInt @@ -24493,7 +19259,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [c] (UBFX [bfc] x) x2) + // match: ( ORshiftLL [c] (UBFX [bfc] x) x2) // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { @@ -24628,67 +19394,23 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + if mem != x1.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) + return true } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (MOVHUloadidx ptr0 idx0 mem) - for { - t := v.Type - if v.AuxInt != 8 { - break - } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true + break } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) @@ -24836,94 +19558,36 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + if mem != x1.Args[1] { + continue + } + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s { + continue + } + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) + return true } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr0 idx0 mem) - for { - t := v.Type - if v.AuxInt != 24 { - break - } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpARM64MOVHUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := o0.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true + break } // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) @@ -25205,156 +19869,60 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + if mem != x1.Args[1] { + continue + } + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { + continue + } + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { + continue + } + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { + continue + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] { + continue + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) + return true } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { - break - } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { - b := v.Block - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - if v.AuxInt != 56 { - break - } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 { - break - } - _ = o2.Args[1] - x0 := o2.Args[0] - if x0.Op != OpARM64MOVWUloadidx { - break - } - mem := x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - y1 := o2.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 { - break - } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s { - break - } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { - break - } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true + break } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx4 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) @@ -25559,6 +20127,10 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { + b := v.Block // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) // result: @mergePoint(b,x0,x1) (REV16W (MOVHUload [i0] {s} p mem)) @@ -25631,82 +20203,36 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + y1 := v.Args[1] + if y1.Op != OpARM64MOVDnop { + continue + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + continue + } + _ = x1.Args[2] + ptr0 := x1.Args[0] + idx0 := x1.Args[1] + if mem != x1.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - ptr0 := x1.Args[0] - idx0 := x1.Args[1] - if mem != x1.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (REV16W (MOVHUloadidx ptr0 idx0 mem)) - for { - t := v.Type - if v.AuxInt != 8 { - break - } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload || x0.AuxInt != 1 { - break - } - s := x0.Aux - mem := x0.Args[1] - p1 := x0.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - ptr0 := x1.Args[0] - idx0 := x1.Args[1] - if mem != x1.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) @@ -25865,108 +20391,40 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x1.Args[1] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + if mem != x1.Args[1] { + continue + } + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { + continue + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + continue + } + _ = x2.Args[2] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + if mem != x2.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - if mem != x2.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUloadidx ptr0 idx0 mem)) - for { - t := v.Type - if v.AuxInt != 24 { - break - } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { - break - } - _ = o0.Args[1] - y0 := o0.Args[0] - if y0.Op != OpARM64REV16W { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVHUload || x0.AuxInt != 2 { - break - } - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o0.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s { - break - } - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x1.Args[1] { - break - } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { - break - } - _ = x2.Args[2] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - if mem != x2.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueARM64_OpARM64ORshiftLL_30(v *Value) bool { - b := v.Block // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUloadidx ptr idx mem)) @@ -26221,137 +20679,39 @@ func rewriteValueARM64_OpARM64ORshiftLL_30(v *Value) bool { if p1.Op != OpARM64ADD { break } - idx1 := p1.Args[1] - ptr1 := p1.Args[0] - if mem != x3.Args[1] { - break + _ = p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr1 := p1.Args[_i0] + idx1 := p1.Args[1^_i0] + if mem != x3.Args[1] { + continue + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + continue + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { + continue + } + _ = x4.Args[2] + ptr0 := x4.Args[0] + idx0 := x4.Args[1] + if mem != x4.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x3.Pos, OpARM64REV, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { - break - } - _ = x4.Args[2] - ptr0 := x4.Args[0] - idx0 := x4.Args[1] - if mem != x4.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x3.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD idx1 ptr1) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDloadidx ptr0 idx0 mem)) - for { - t := v.Type - if v.AuxInt != 56 { - break - } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 { - break - } - _ = o2.Args[1] - y0 := o2.Args[0] - if y0.Op != OpARM64REVW { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVWUload || x0.AuxInt != 4 { - break - } - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - y1 := o2.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload || x1.AuxInt != 3 || x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload || x3.AuxInt != 1 || x3.Aux != s { - break - } - _ = x3.Args[1] - p1 := x3.Args[0] - if p1.Op != OpARM64ADD { - break - } - ptr1 := p1.Args[1] - idx1 := p1.Args[0] - if mem != x3.Args[1] { - break - } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { - break - } - _ = x4.Args[2] - ptr0 := x4.Args[0] - idx0 := x4.Args[1] - if mem != x4.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x3.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) @@ -26579,7 +20939,7 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { v.AddArg(y) return true } - // match: (ORshiftRL [c] (SLLconst x [64-c]) x) + // match: ( ORshiftRL [c] (SLLconst x [64-c]) x) // result: (RORconst [ c] x) for { c := v.AuxInt @@ -26593,7 +20953,7 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) + // match: ( ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) // cond: c < 32 && t.Size() == 4 // result: (RORWconst [c] x) for { @@ -27789,153 +22149,91 @@ func rewriteValueARM64_OpARM64TST_0(v *Value) bool { // result: (TSTconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64TSTconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64TSTconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TST (MOVDconst [c]) x) - // result: (TSTconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64TSTconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (TST x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SLLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64TSTshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (TST x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (TSTshiftLL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64TSTshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (TST x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftRL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64TSTshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (TST x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (TSTshiftRL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64TSTshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (TST x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftRA x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRAconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64TSTshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (TST x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (TSTshiftRA x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64TSTshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } return false } @@ -27944,30 +22242,19 @@ func rewriteValueARM64_OpARM64TSTW_0(v *Value) bool { // result: (TSTWconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64TSTWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64TSTWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (TSTW (MOVDconst [c]) x) - // result: (TSTWconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64TSTWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -28551,34 +22838,25 @@ func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (XOR x (MOVDconst [c])) // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVDconst [c]) x) - // result: (XORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR x x) // result: (MOVDconst [0]) @@ -28595,716 +22873,388 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // result: (EON x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MVN { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64MVN { + continue + } + y := v_1.Args[0] + v.reset(OpARM64EON) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpARM64EON) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (MVN y) x) - // result: (EON x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { - break - } - y := v_0.Args[0] - v.reset(OpARM64EON) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SLLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64XORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (XOR x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (XORshiftLL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64XORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (XOR x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftRL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRLconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64XORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (XOR x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (XORshiftRL x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64XORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (XOR x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftRA x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + x1 := v.Args[1^_i0] + if x1.Op != OpARM64SRAconst { + continue + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64XORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64XOR_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (XOR x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (XORshiftRA x0 y [c]) - for { - x0 := v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { - break - } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { - break - } - v.reset(OpARM64XORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true + break } // match: (XOR (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x (NEG y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (XOR (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SLL x (ANDconst [63] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (ROR x (NEG y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt64 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SRL || v_0_0.Type != typ.UInt64 { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 64 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 63 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 64 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 63 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (XOR (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SRL x (ANDconst [63] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (ROR x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt64 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 64 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 63 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 64 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 63 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SRL || v_1.Type != typ.UInt64 { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64ROR) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x (NEG y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SLL { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (XOR (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SLL x (ANDconst [31] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (RORW x (NEG y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt32 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SRL || v_0_0.Type != typ.UInt32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpARM64MOVWUreg { - break - } - x := v_0_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 32 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 31 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 32 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 31 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SLL { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (XOR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64MOVWUreg { + continue + } + x := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + continue + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { + continue + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { + continue + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { + continue + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { + continue + } + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64MOVWUreg { - break - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - break - } - t := v_0_1.Type - if v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - break - } - cc := v_1.Aux - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - break - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SRL (MOVWUreg x) (ANDconst [31] y))) - // cond: cc.(Op) == OpARM64LessThanU - // result: (RORW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64CSEL0 || v_0.Type != typ.UInt32 { - break - } - cc := v_0.Aux - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64SLL { - break - } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpARM64SUB { - break - } - t := v_0_0_1.Type - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpARM64MOVDconst || v_0_0_1_0.AuxInt != 32 { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpARM64ANDconst || v_0_0_1_1.Type != t || v_0_0_1_1.AuxInt != 31 { - break - } - y := v_0_0_1_1.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64CMPconst || v_0_1.AuxInt != 64 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpARM64SUB || v_0_1_0.Type != t { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpARM64MOVDconst || v_0_1_0_0.AuxInt != 32 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpARM64ANDconst || v_0_1_0_1.Type != t || v_0_1_0_1.AuxInt != 31 || y != v_0_1_0_1.Args[0] { - break - } - v_1 := v.Args[1] - if v_1.Op != OpARM64SRL || v_1.Type != typ.UInt32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVWUreg || x != v_1_0.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64ANDconst || v_1_1.Type != t || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) { - break - } - v.reset(OpARM64RORW) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -35729,17 +29679,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64EQ) + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64EQ) - v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 @@ -35753,17 +29707,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64EQ) + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64EQ) - v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 @@ -35849,17 +29807,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64EQ) + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64EQ) - v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 @@ -35873,17 +29835,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64EQ) + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64EQ) - v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 @@ -36215,17 +30181,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GE) + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GE) - v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 @@ -36239,17 +30209,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GE) + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GE) - v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 @@ -36335,17 +30309,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GE) + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GE) - v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 @@ -36359,17 +30337,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GE) + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GE) - v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 @@ -36631,17 +30613,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GT) + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GT) - v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 @@ -36655,17 +30641,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GT) + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GT) - v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 @@ -36751,17 +30741,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GT) + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GT) - v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 @@ -36775,17 +30769,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64GT) + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64GT) - v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 @@ -37157,17 +31155,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LE) + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LE) - v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 @@ -37181,17 +31183,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LE) + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LE) - v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 @@ -37277,17 +31283,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LE) + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LE) - v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 @@ -37301,17 +31311,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LE) + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LE) - v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 @@ -37547,17 +31561,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LT) + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LT) - v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 @@ -37571,17 +31589,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LT) + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LT) - v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 @@ -37667,17 +31689,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LT) + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LT) - v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 @@ -37691,17 +31717,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64LT) + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64LT) - v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 @@ -37964,17 +31994,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64NE) + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64NE) - v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 @@ -37988,17 +32022,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64NE) + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64NE) - v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 @@ -38084,17 +32122,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64NE) + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64NE) - v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 @@ -38108,17 +32150,21 @@ func rewriteBlockARM64(b *Block) bool { if z.Op != OpARM64ADD { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockARM64NE) + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockARM64NE) - v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index d17be4422b..912c4a1082 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -464,9 +464,9 @@ func rewriteValueMIPS(v *Value) bool { case OpRsh8x8: return rewriteValueMIPS_OpRsh8x8_0(v) case OpSelect0: - return rewriteValueMIPS_OpSelect0_0(v) || rewriteValueMIPS_OpSelect0_10(v) + return rewriteValueMIPS_OpSelect0_0(v) case OpSelect1: - return rewriteValueMIPS_OpSelect1_0(v) || rewriteValueMIPS_OpSelect1_10(v) + return rewriteValueMIPS_OpSelect1_0(v) case OpSignExt16to32: return rewriteValueMIPS_OpSignExt16to32_0(v) case OpSignExt8to16: @@ -2650,59 +2650,37 @@ func rewriteValueMIPS_OpMIPSADD_0(v *Value) bool { // result: (ADDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpMIPSADDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpMIPSADDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (MOVWconst [c]) x) - // result: (ADDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpMIPSADDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD x (NEG y)) // result: (SUB x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSNEG { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSNEG { + continue + } + y := v_1.Args[0] + v.reset(OpMIPSSUB) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpMIPSSUB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (NEG y) x) - // result: (SUB x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSNEG { - break - } - y := v_0.Args[0] - v.reset(OpMIPSSUB) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -2787,30 +2765,19 @@ func rewriteValueMIPS_OpMIPSAND_0(v *Value) bool { // result: (ANDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpMIPSANDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpMIPSANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVWconst [c]) x) - // result: (ANDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpMIPSANDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (AND x x) // result: x @@ -2828,45 +2795,26 @@ func rewriteValueMIPS_OpMIPSAND_0(v *Value) bool { // result: (SGTUconst [1] (OR x y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSSGTUconst || v_0.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSSGTUconst || v_0.AuxInt != 1 { + continue + } + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSSGTUconst || v_1.AuxInt != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpMIPSSGTUconst) + v.AuxInt = 1 + v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSSGTUconst || v_1.AuxInt != 1 { - break - } - y := v_1.Args[0] - v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 - v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (AND (SGTUconst [1] y) (SGTUconst [1] x)) - // result: (SGTUconst [1] (OR x y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSSGTUconst || v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSSGTUconst || v_1.AuxInt != 1 { - break - } - x := v_1.Args[0] - v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 - v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } return false } @@ -4643,154 +4591,96 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { - // match: (MUL (MOVWconst [0]) _) + // match: (MUL (MOVWconst [0]) _ ) // result: (MOVWconst [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 0 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = 0 + return true } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true + break } - // match: (MUL _ (MOVWconst [0])) - // result: (MOVWconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (MUL (MOVWconst [1]) x) - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MUL x (MOVWconst [1])) + // match: (MUL (MOVWconst [1]) x ) // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } - // match: (MUL (MOVWconst [-1]) x) - // result: (NEG x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != -1 { - break - } - v.reset(OpMIPSNEG) - v.AddArg(x) - return true - } - // match: (MUL x (MOVWconst [-1])) + // match: (MUL (MOVWconst [-1]) x ) // result: (NEG x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpMIPSNEG) + v.AddArg(x) + return true } - v.reset(OpMIPSNEG) - v.AddArg(x) - return true + break } - // match: (MUL (MOVWconst [c]) x) - // cond: isPowerOfTwo(int64(uint32(c))) - // result: (SLLconst [log2(int64(uint32(c)))] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSLLconst) - v.AuxInt = log2(int64(uint32(c))) - v.AddArg(x) - return true - } - // match: (MUL x (MOVWconst [c])) + // match: (MUL (MOVWconst [c]) x ) // cond: isPowerOfTwo(int64(uint32(c))) // result: (SLLconst [log2(int64(uint32(c)))] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSMOVWconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if !(isPowerOfTwo(int64(uint32(c)))) { + continue + } + v.reset(OpMIPSSLLconst) + v.AuxInt = log2(int64(uint32(c))) + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSLLconst) - v.AuxInt = log2(int64(uint32(c))) - v.AddArg(x) - return true + break } // match: (MUL (MOVWconst [c]) (MOVWconst [d])) // result: (MOVWconst [int64(int32(c)*int32(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSMOVWconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSMOVWconst { + continue + } + d := v_1.AuxInt + v.reset(OpMIPSMOVWconst) + v.AuxInt = int64(int32(c) * int32(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - d := v_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = int64(int32(c) * int32(d)) - return true - } - // match: (MUL (MOVWconst [d]) (MOVWconst [c])) - // result: (MOVWconst [int64(int32(c)*int32(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = int64(int32(c) * int32(d)) - return true + break } return false } @@ -4814,30 +4704,19 @@ func rewriteValueMIPS_OpMIPSNOR_0(v *Value) bool { // result: (NORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpMIPSNORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpMIPSNORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (NOR (MOVWconst [c]) x) - // result: (NORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpMIPSNORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -4863,30 +4742,19 @@ func rewriteValueMIPS_OpMIPSOR_0(v *Value) bool { // result: (ORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpMIPSORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpMIPSORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVWconst [c]) x) - // result: (ORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpMIPSORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (OR x x) // result: x @@ -4904,43 +4772,25 @@ func rewriteValueMIPS_OpMIPSOR_0(v *Value) bool { // result: (SGTUzero (OR x y)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSSGTUzero { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMIPSSGTUzero { + continue + } + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSSGTUzero { + continue + } + y := v_1.Args[0] + v.reset(OpMIPSSGTUzero) + v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSSGTUzero { - break - } - y := v_1.Args[0] - v.reset(OpMIPSSGTUzero) - v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (OR (SGTUzero y) (SGTUzero x)) - // result: (SGTUzero (OR x y)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSSGTUzero { - break - } - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSSGTUzero { - break - } - x := v_1.Args[0] - v.reset(OpMIPSSGTUzero) - v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true + break } return false } @@ -5661,30 +5511,19 @@ func rewriteValueMIPS_OpMIPSXOR_0(v *Value) bool { // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := v_1.AuxInt + v.reset(OpMIPSXORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpMIPSXORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVWconst [c]) x) - // result: (XORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpMIPSXORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR x x) // result: (MOVWconst [0]) @@ -7736,7 +7575,7 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { v.AddArg(y) return true } - // match: (Select0 (MULTU (MOVWconst [0]) _)) + // match: (Select0 (MULTU (MOVWconst [0]) _ )) // result: (MOVWconst [0]) for { v_0 := v.Args[0] @@ -7744,15 +7583,18 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = 0 + return true } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true + break } - // match: (Select0 (MULTU _ (MOVWconst [0]))) + // match: (Select0 (MULTU (MOVWconst [1]) _ )) // result: (MOVWconst [0]) for { v_0 := v.Args[0] @@ -7760,70 +7602,18 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst || v_0_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = 0 + return true } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true + break } - // match: (Select0 (MULTU (MOVWconst [1]) _)) - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Select0 (MULTU _ (MOVWconst [1]))) - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst || v_0_1.AuxInt != 1 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Select0 (MULTU (MOVWconst [-1]) x)) - // result: (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 { - break - } - v.reset(OpMIPSCMOVZ) - v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) - v0.AuxInt = -1 - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(x) - return true - } - // match: (Select0 (MULTU x (MOVWconst [-1]))) + // match: (Select0 (MULTU (MOVWconst [-1]) x )) // result: (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) for { v_0 := v.Args[0] @@ -7831,45 +7621,26 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) + v0.AuxInt = -1 + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(x) + return true } - v.reset(OpMIPSCMOVZ) - v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) - v0.AuxInt = -1 - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(x) - return true + break } - // match: (Select0 (MULTU (MOVWconst [c]) x)) - // cond: isPowerOfTwo(int64(uint32(c))) - // result: (SRLconst [32-log2(int64(uint32(c)))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - c := v_0_0.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSRLconst) - v.AuxInt = 32 - log2(int64(uint32(c))) - v.AddArg(x) - return true - } - // match: (Select0 (MULTU x (MOVWconst [c]))) + // match: (Select0 (MULTU (MOVWconst [c]) x )) // cond: isPowerOfTwo(int64(uint32(c))) // result: (SRLconst [32-log2(int64(uint32(c)))] x) for { @@ -7878,23 +7649,23 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(isPowerOfTwo(int64(uint32(c)))) { + continue + } + v.reset(OpMIPSSRLconst) + v.AuxInt = 32 - log2(int64(uint32(c))) + v.AddArg(x) + return true } - c := v_0_1.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSRLconst) - v.AuxInt = 32 - log2(int64(uint32(c))) - v.AddArg(x) - return true + break } - return false -} -func rewriteValueMIPS_OpSelect0_10(v *Value) bool { // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [(c*d)>>32]) for { @@ -7903,41 +7674,22 @@ func rewriteValueMIPS_OpSelect0_10(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := v_0_0.AuxInt + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPSMOVWconst { + continue + } + d := v_0_1.AuxInt + v.reset(OpMIPSMOVWconst) + v.AuxInt = (c * d) >> 32 + return true } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - d := v_0_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = (c * d) >> 32 - return true - } - // match: (Select0 (MULTU (MOVWconst [d]) (MOVWconst [c]))) - // result: (MOVWconst [(c*d)>>32]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - d := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - c := v_0_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = (c * d) >> 32 - return true + break } // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(c)%int32(d))]) @@ -8026,7 +7778,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULTU (MOVWconst [0]) _)) + // match: (Select1 (MULTU (MOVWconst [0]) _ )) // result: (MOVWconst [0]) for { v_0 := v.Args[0] @@ -8034,48 +7786,18 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = 0 + return true } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true + break } - // match: (Select1 (MULTU _ (MOVWconst [0]))) - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst || v_0_1.AuxInt != 0 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Select1 (MULTU (MOVWconst [1]) x)) - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Select1 (MULTU x (MOVWconst [1]))) + // match: (Select1 (MULTU (MOVWconst [1]) x )) // result: x for { v_0 := v.Args[0] @@ -8083,34 +7805,20 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst || v_0_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } - // match: (Select1 (MULTU (MOVWconst [-1]) x)) - // result: (NEG x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 { - break - } - v.reset(OpMIPSNEG) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Select1 (MULTU x (MOVWconst [-1]))) + // match: (Select1 (MULTU (MOVWconst [-1]) x )) // result: (NEG x) for { v_0 := v.Args[0] @@ -8118,39 +7826,20 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpMIPSNEG) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpMIPSNEG) - v.Type = x.Type - v.AddArg(x) - return true + break } - // match: (Select1 (MULTU (MOVWconst [c]) x)) - // cond: isPowerOfTwo(int64(uint32(c))) - // result: (SLLconst [log2(int64(uint32(c)))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - c := v_0_0.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSLLconst) - v.AuxInt = log2(int64(uint32(c))) - v.AddArg(x) - return true - } - // match: (Select1 (MULTU x (MOVWconst [c]))) + // match: (Select1 (MULTU (MOVWconst [c]) x )) // cond: isPowerOfTwo(int64(uint32(c))) // result: (SLLconst [log2(int64(uint32(c)))] x) for { @@ -8159,23 +7848,23 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(isPowerOfTwo(int64(uint32(c)))) { + continue + } + v.reset(OpMIPSSLLconst) + v.AuxInt = log2(int64(uint32(c))) + v.AddArg(x) + return true } - c := v_0_1.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSLLconst) - v.AuxInt = log2(int64(uint32(c))) - v.AddArg(x) - return true + break } - return false -} -func rewriteValueMIPS_OpSelect1_10(v *Value) bool { // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))]) for { @@ -8184,41 +7873,22 @@ func rewriteValueMIPS_OpSelect1_10(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := v_0_0.AuxInt + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPSMOVWconst { + continue + } + d := v_0_1.AuxInt + v.reset(OpMIPSMOVWconst) + v.AuxInt = int64(int32(uint32(c) * uint32(d))) + return true } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - d := v_0_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = int64(int32(uint32(c) * uint32(d))) - return true - } - // match: (Select1 (MULTU (MOVWconst [d]) (MOVWconst [c]))) - // result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - d := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - c := v_0_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = int64(int32(uint32(c) * uint32(d))) - return true + break } // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(c)/int32(d))]) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index a62ac3cdb8..98d1e3bd25 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -546,7 +546,7 @@ func rewriteValueMIPS64(v *Value) bool { case OpSelect0: return rewriteValueMIPS64_OpSelect0_0(v) case OpSelect1: - return rewriteValueMIPS64_OpSelect1_0(v) || rewriteValueMIPS64_OpSelect1_10(v) + return rewriteValueMIPS64_OpSelect1_0(v) case OpSignExt16to32: return rewriteValueMIPS64_OpSignExt16to32_0(v) case OpSignExt16to64: @@ -3093,66 +3093,40 @@ func rewriteValueMIPS64_OpMIPS64ADDV_0(v *Value) bool { // result: (ADDVconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64ADDVconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64ADDVconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDV (MOVVconst [c]) x) - // cond: is32Bit(c) - // result: (ADDVconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64ADDVconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDV x (NEGV y)) // result: (SUBV x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64NEGV { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPS64NEGV { + continue + } + y := v_1.Args[0] + v.reset(OpMIPS64SUBV) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpMIPS64SUBV) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDV (NEGV y) x) - // result: (SUBV x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPS64NEGV { - break - } - y := v_0.Args[0] - v.reset(OpMIPS64SUBV) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -3245,37 +3219,22 @@ func rewriteValueMIPS64_OpMIPS64AND_0(v *Value) bool { // result: (ANDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64ANDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVVconst [c]) x) - // cond: is32Bit(c) - // result: (ANDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (AND x x) // result: x @@ -5182,37 +5141,22 @@ func rewriteValueMIPS64_OpMIPS64NOR_0(v *Value) bool { // result: (NORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64NORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64NORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (NOR (MOVVconst [c]) x) - // cond: is32Bit(c) - // result: (NORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64NORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -5238,37 +5182,22 @@ func rewriteValueMIPS64_OpMIPS64OR_0(v *Value) bool { // result: (ORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64ORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64ORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVVconst [c]) x) - // cond: is32Bit(c) - // result: (ORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64ORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (OR x x) // result: x @@ -5943,37 +5872,22 @@ func rewriteValueMIPS64_OpMIPS64XOR_0(v *Value) bool { // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64XORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVVconst [c]) x) - // cond: is32Bit(c) - // result: (XORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpMIPS64XORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR x x) // result: (MOVVconst [0]) @@ -8387,30 +8301,17 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 { + continue + } + v.reset(OpMIPS64NEGV) + v.AddArg(x) + return true } - v.reset(OpMIPS64NEGV) - v.AddArg(x) - return true - } - // match: (Select1 (MULVU (MOVVconst [-1]) x)) - // result: (NEGV x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst || v_0_0.AuxInt != -1 { - break - } - v.reset(OpMIPS64NEGV) - v.AddArg(x) - return true + break } // match: (Select1 (MULVU _ (MOVVconst [0]))) // result: (MOVVconst [0]) @@ -8420,29 +8321,16 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { + continue + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = 0 + return true } - v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 - return true - } - // match: (Select1 (MULVU (MOVVconst [0]) _)) - // result: (MOVVconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst || v_0_0.AuxInt != 0 { - break - } - v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 - return true + break } // match: (Select1 (MULVU x (MOVVconst [1]))) // result: x @@ -8452,32 +8340,18 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Select1 (MULVU (MOVVconst [1]) x)) - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst || v_0_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Select1 (MULVU x (MOVVconst [c]))) // cond: isPowerOfTwo(c) @@ -8488,41 +8362,22 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPS64MOVVconst { + continue + } + c := v_0_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpMIPS64SLLVconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true } - c := v_0_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpMIPS64SLLVconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (Select1 (MULVU (MOVVconst [c]) x)) - // cond: isPowerOfTwo(c) - // result: (SLLVconst [log2(c)] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpMIPS64SLLVconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true + break } // match: (Select1 (DIVVU x (MOVVconst [1]))) // result: x @@ -8565,9 +8420,6 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValueMIPS64_OpSelect1_10(v *Value) bool { // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [c*d]) for { @@ -8576,41 +8428,22 @@ func rewriteValueMIPS64_OpSelect1_10(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpMIPS64MOVVconst { + continue + } + c := v_0_0.AuxInt + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpMIPS64MOVVconst { + continue + } + d := v_0_1.AuxInt + v.reset(OpMIPS64MOVVconst) + v.AuxInt = c * d + return true } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - d := v_0_1.AuxInt - v.reset(OpMIPS64MOVVconst) - v.AuxInt = c * d - return true - } - // match: (Select1 (MULVU (MOVVconst [d]) (MOVVconst [c]))) - // result: (MOVVconst [c*d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - d := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - c := v_0_1.AuxInt - v.reset(OpMIPS64MOVVconst) - v.AuxInt = c * d - return true + break } // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [c/d]) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index a8641ce1a4..bb6eb60efe 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -404,7 +404,7 @@ func rewriteValuePPC64(v *Value) bool { case OpPPC64ADDconst: return rewriteValuePPC64_OpPPC64ADDconst_0(v) case OpPPC64AND: - return rewriteValuePPC64_OpPPC64AND_0(v) || rewriteValuePPC64_OpPPC64AND_10(v) + return rewriteValuePPC64_OpPPC64AND_0(v) case OpPPC64ANDconst: return rewriteValuePPC64_OpPPC64ANDconst_0(v) || rewriteValuePPC64_OpPPC64ANDconst_10(v) case OpPPC64CMP: @@ -546,7 +546,7 @@ func rewriteValuePPC64(v *Value) bool { case OpPPC64NotEqual: return rewriteValuePPC64_OpPPC64NotEqual_0(v) case OpPPC64OR: - return rewriteValuePPC64_OpPPC64OR_0(v) || rewriteValuePPC64_OpPPC64OR_10(v) || rewriteValuePPC64_OpPPC64OR_20(v) || rewriteValuePPC64_OpPPC64OR_30(v) || rewriteValuePPC64_OpPPC64OR_40(v) || rewriteValuePPC64_OpPPC64OR_50(v) || rewriteValuePPC64_OpPPC64OR_60(v) || rewriteValuePPC64_OpPPC64OR_70(v) || rewriteValuePPC64_OpPPC64OR_80(v) || rewriteValuePPC64_OpPPC64OR_90(v) || rewriteValuePPC64_OpPPC64OR_100(v) || rewriteValuePPC64_OpPPC64OR_110(v) + return rewriteValuePPC64_OpPPC64OR_0(v) || rewriteValuePPC64_OpPPC64OR_10(v) || rewriteValuePPC64_OpPPC64OR_20(v) case OpPPC64ORN: return rewriteValuePPC64_OpPPC64ORN_0(v) case OpPPC64ORconst: @@ -558,7 +558,7 @@ func rewriteValuePPC64(v *Value) bool { case OpPPC64SUB: return rewriteValuePPC64_OpPPC64SUB_0(v) case OpPPC64XOR: - return rewriteValuePPC64_OpPPC64XOR_0(v) || rewriteValuePPC64_OpPPC64XOR_10(v) + return rewriteValuePPC64_OpPPC64XOR_0(v) case OpPPC64XORconst: return rewriteValuePPC64_OpPPC64XORconst_0(v) case OpPanicBounds: @@ -606,7 +606,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh32Ux32: return rewriteValuePPC64_OpRsh32Ux32_0(v) case OpRsh32Ux64: - return rewriteValuePPC64_OpRsh32Ux64_0(v) || rewriteValuePPC64_OpRsh32Ux64_10(v) + return rewriteValuePPC64_OpRsh32Ux64_0(v) case OpRsh32Ux8: return rewriteValuePPC64_OpRsh32Ux8_0(v) case OpRsh32x16: @@ -614,7 +614,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh32x32: return rewriteValuePPC64_OpRsh32x32_0(v) case OpRsh32x64: - return rewriteValuePPC64_OpRsh32x64_0(v) || rewriteValuePPC64_OpRsh32x64_10(v) + return rewriteValuePPC64_OpRsh32x64_0(v) case OpRsh32x8: return rewriteValuePPC64_OpRsh32x8_0(v) case OpRsh64Ux16: @@ -622,7 +622,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh64Ux32: return rewriteValuePPC64_OpRsh64Ux32_0(v) case OpRsh64Ux64: - return rewriteValuePPC64_OpRsh64Ux64_0(v) || rewriteValuePPC64_OpRsh64Ux64_10(v) + return rewriteValuePPC64_OpRsh64Ux64_0(v) case OpRsh64Ux8: return rewriteValuePPC64_OpRsh64Ux8_0(v) case OpRsh64x16: @@ -630,7 +630,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh64x32: return rewriteValuePPC64_OpRsh64x32_0(v) case OpRsh64x64: - return rewriteValuePPC64_OpRsh64x64_0(v) || rewriteValuePPC64_OpRsh64x64_10(v) + return rewriteValuePPC64_OpRsh64x64_0(v) case OpRsh64x8: return rewriteValuePPC64_OpRsh64x8_0(v) case OpRsh8Ux16: @@ -1832,41 +1832,25 @@ func rewriteValuePPC64_OpEq16_0(v *Value) bool { // cond: isSigned(x.Type) && isSigned(y.Type) // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + y := v.Args[1^_i0] + if !(isSigned(x.Type) && isSigned(y.Type)) { + continue + } + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64Equal) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - // match: (Eq16 y x) - // cond: isSigned(x.Type) && isSigned(y.Type) - // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) - for { - x := v.Args[1] - y := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break - } - v.reset(OpPPC64Equal) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } // match: (Eq16 x y) // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) @@ -1952,41 +1936,25 @@ func rewriteValuePPC64_OpEq8_0(v *Value) bool { // cond: isSigned(x.Type) && isSigned(y.Type) // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + y := v.Args[1^_i0] + if !(isSigned(x.Type) && isSigned(y.Type)) { + continue + } + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64Equal) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - // match: (Eq8 y x) - // cond: isSigned(x.Type) && isSigned(y.Type) - // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) - for { - x := v.Args[1] - y := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break - } - v.reset(OpPPC64Equal) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } // match: (Eq8 x y) // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) @@ -3480,40 +3448,21 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1.Args[_i0] + v_1_1 := v_1.Args[1^_i0] + if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { + continue + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true } - v.reset(OpPPC64SLW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = 31 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (Lsh32x64 x (AND (MOVDconst [31]) y)) - // result: (SLW x (ANDconst [31] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64AND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 31 { - break - } - v.reset(OpPPC64SLW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = 31 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (Lsh32x64 x (ANDconst [31] y)) // result: (SLW x (ANDconst [31] y)) @@ -3786,40 +3735,21 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1.Args[_i0] + v_1_1 := v_1.Args[1^_i0] + if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { + continue + } + v.reset(OpPPC64SLD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - v.reset(OpPPC64SLD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (Lsh64x64 x (AND (MOVDconst [63]) y)) - // result: (SLD x (ANDconst [63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64AND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 63 { - break - } - v.reset(OpPPC64SLD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (Lsh64x64 x (ANDconst [63] y)) // result: (SLD x (ANDconst [63] y)) @@ -4701,41 +4631,25 @@ func rewriteValuePPC64_OpNeq16_0(v *Value) bool { // cond: isSigned(x.Type) && isSigned(y.Type) // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + y := v.Args[1^_i0] + if !(isSigned(x.Type) && isSigned(y.Type)) { + continue + } + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64NotEqual) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - // match: (Neq16 y x) - // cond: isSigned(x.Type) && isSigned(y.Type) - // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) - for { - x := v.Args[1] - y := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break - } - v.reset(OpPPC64NotEqual) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } // match: (Neq16 x y) // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) @@ -4821,41 +4735,25 @@ func rewriteValuePPC64_OpNeq8_0(v *Value) bool { // cond: isSigned(x.Type) && isSigned(y.Type) // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + y := v.Args[1^_i0] + if !(isSigned(x.Type) && isSigned(y.Type)) { + continue + } + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64NotEqual) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - // match: (Neq8 y x) - // cond: isSigned(x.Type) && isSigned(y.Type) - // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) - for { - x := v.Args[1] - y := v.Args[0] - if !(isSigned(x.Type) && isSigned(y.Type)) { - break - } - v.reset(OpPPC64NotEqual) - v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } // match: (Neq8 x y) // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) @@ -5008,297 +4906,164 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { // result: (ROTLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLDconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRDconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpPPC64ROTLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (ROTLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD (SLWconst x [c]) (SRWconst x [d])) // cond: d == 32-c // result: (ROTLWconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLWconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpPPC64ROTLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (ROTLWconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLWconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) // result: (ROTL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLD { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRD { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] { + continue + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRD { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) (SLD x (ANDconst [63] y))) - // result: (ROTL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRD { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64SUB || v_0_1.Type != typ.UInt { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64MOVDconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpPPC64ANDconst || v_0_1_1.Type != typ.UInt || v_0_1_1.AuxInt != 63 { - break - } - y := v_0_1_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLD { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.Int64 || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRW { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] { + continue + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) (SLW x (ANDconst [31] y))) - // result: (ROTLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRW { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64SUB || v_0_1.Type != typ.UInt { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64MOVDconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpPPC64ANDconst || v_0_1_1.Type != typ.UInt || v_0_1_1.AuxInt != 31 { - break - } - y := v_0_1_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.Int32 || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) // result: (ADDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpPPC64ADDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpPPC64ADDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (MOVDconst [c]) x) - // cond: is32Bit(c) - // result: (ADDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpPPC64ADDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -5358,225 +5123,129 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { // result: (ANDN x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64NOR { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64NOR { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpPPC64ANDN) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[1] - if y != v_1.Args[0] { - break - } - v.reset(OpPPC64ANDN) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (NOR y y) x) - // result: (ANDN x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64NOR { - break - } - y := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpPPC64ANDN) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (AND (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c&d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c & d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c & d - return true - } - // match: (AND (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c&d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c & d - return true + break } // match: (AND x (MOVDconst [c])) // cond: isU16Bit(c) // result: (ANDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isU16Bit(c)) { + continue + } + v.reset(OpPPC64ANDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isU16Bit(c)) { - break - } - v.reset(OpPPC64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [c]) x) - // cond: isU16Bit(c) - // result: (ANDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - c := v_0.AuxInt - if !(isU16Bit(c)) { - break - } - v.reset(OpPPC64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (AND (MOVDconst [c]) y:(MOVWZreg _)) // cond: c&0xFFFFFFFF == 0xFFFFFFFF // result: y for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + y := v.Args[1^_i0] + if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) { + continue + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - c := v_0.AuxInt - y := v.Args[1] - if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (AND y:(MOVWZreg _) (MOVDconst [c])) - // cond: c&0xFFFFFFFF == 0xFFFFFFFF - // result: y - for { - _ = v.Args[1] - y := v.Args[0] - if y.Op != OpPPC64MOVWZreg { - break - } - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - if !(c&0xFFFFFFFF == 0xFFFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) // result: (MOVWZreg x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 0xFFFFFFFF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 0xFFFFFFFF { + continue + } + y := v.Args[1^_i0] + if y.Op != OpPPC64MOVWreg { + continue + } + x := y.Args[0] + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true } - y := v.Args[1] - if y.Op != OpPPC64MOVWreg { - break - } - x := y.Args[0] - v.reset(OpPPC64MOVWZreg) - v.AddArg(x) - return true + break } - // match: (AND y:(MOVWreg x) (MOVDconst [0xFFFFFFFF])) - // result: (MOVWZreg x) - for { - _ = v.Args[1] - y := v.Args[0] - if y.Op != OpPPC64MOVWreg { - break - } - x := y.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0xFFFFFFFF { - break - } - v.reset(OpPPC64MOVWZreg) - v.AddArg(x) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64AND_10(v *Value) bool { // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) // result: (ANDconst [c&0xFF] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + x := v.Args[1^_i0] + if x.Op != OpPPC64MOVBZload { + continue + } + _ = x.Args[1] + v.reset(OpPPC64ANDconst) + v.AuxInt = c & 0xFF + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v.Args[1] - if x.Op != OpPPC64MOVBZload { - break - } - _ = x.Args[1] - v.reset(OpPPC64ANDconst) - v.AuxInt = c & 0xFF - v.AddArg(x) - return true - } - // match: (AND x:(MOVBZload _ _) (MOVDconst [c])) - // result: (ANDconst [c&0xFF] x) - for { - _ = v.Args[1] - x := v.Args[0] - if x.Op != OpPPC64MOVBZload { - break - } - _ = x.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64ANDconst) - v.AuxInt = c & 0xFF - v.AddArg(x) - return true + break } return false } @@ -6278,72 +5947,46 @@ func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool { func rewriteValuePPC64_OpPPC64FADD_0(v *Value) bool { // match: (FADD (FMUL x y) z) // result: (FMADD x y z) - for { - z := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64FMUL { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpPPC64FMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (FADD z (FMUL x y)) - // result: (FMADD x y z) for { _ = v.Args[1] - z := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FMUL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64FMUL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + z := v.Args[1^_i0] + v.reset(OpPPC64FMADD) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpPPC64FMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } func rewriteValuePPC64_OpPPC64FADDS_0(v *Value) bool { // match: (FADDS (FMULS x y) z) // result: (FMADDS x y z) - for { - z := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64FMULS { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpPPC64FMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (FADDS z (FMULS x y)) - // result: (FMADDS x y z) for { _ = v.Args[1] - z := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FMULS { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64FMULS { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + z := v.Args[1^_i0] + v.reset(OpPPC64FMADDS) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - y := v_1.Args[1] - x := v_1.Args[0] - v.reset(OpPPC64FMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } @@ -11268,40 +10911,21 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { break } _ = y.Args[1] - y_0 := y.Args[0] - if y_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y_0 := y.Args[_i0] + if y_0.Op != OpPPC64MOVDconst { + continue + } + c := y_0.AuxInt + if !(uint64(c) <= 0xFFFFFFFF) { + continue + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - c := y_0.AuxInt - if !(uint64(c) <= 0xFFFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVWZreg y:(AND _ (MOVDconst [c]))) - // cond: uint64(c) <= 0xFFFFFFFF - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64AND { - break - } - _ = y.Args[1] - y_1 := y.Args[1] - if y_1.Op != OpPPC64MOVDconst { - break - } - c := y_1.AuxInt - if !(uint64(c) <= 0xFFFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (MOVWZreg (SRWconst [c] (MOVBZreg x))) // result: (SRWconst [c] (MOVBZreg x)) @@ -11423,9 +11047,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { v.AddArg(y) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg y:(MOVBZreg _)) // result: y for { @@ -11438,6 +11059,9 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { v.AddArg(y) return true } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg y:(MOVHBRload _ _)) // result: y for { @@ -11554,9 +11178,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool { // match: (MOVWZreg x:(Arg )) // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) // result: x @@ -11574,6 +11195,9 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool { // match: (MOVWZreg (MOVDconst [c])) // result: (MOVDconst [int64(uint32(c))]) for { @@ -11734,40 +11358,21 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { break } _ = y.Args[1] - y_0 := y.Args[0] - if y_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y_0 := y.Args[_i0] + if y_0.Op != OpPPC64MOVDconst { + continue + } + c := y_0.AuxInt + if !(uint64(c) <= 0x7FFFFFFF) { + continue + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - c := y_0.AuxInt - if !(uint64(c) <= 0x7FFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVWreg y:(AND _ (MOVDconst [c]))) - // cond: uint64(c) <= 0x7FFFFFFF - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64AND { - break - } - _ = y.Args[1] - y_1 := y.Args[1] - if y_1.Op != OpPPC64MOVDconst { - break - } - c := y_1.AuxInt - if !(uint64(c) <= 0x7FFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (MOVWreg (SRAWconst [c] (MOVBreg x))) // result: (SRAWconst [c] (MOVBreg x)) @@ -11895,9 +11500,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { v.AddArg(y) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg y:(MOVHreg _)) // result: y for { @@ -11910,6 +11512,9 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { v.AddArg(y) return true } + return false +} +func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg y:(MOVBreg _)) // result: y for { @@ -12423,345 +12028,192 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { } func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { b := v.Block + config := b.Func.Config typ := &b.Func.Config.Types - // match: (OR (SLDconst x [c]) (SRDconst x [d])) + // match: ( OR (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (ROTLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLDconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRDconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpPPC64ROTLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (OR (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (ROTLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (SLWconst x [c]) (SRWconst x [d])) + // match: ( OR (SLWconst x [c]) (SRWconst x [d])) // cond: d == 32-c // result: (ROTLWconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLWconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpPPC64ROTLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (OR (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (ROTLWconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLWconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) // result: (ROTL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLD { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRD { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] { + continue + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRD { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) - return true + break } - // match: (OR (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) (SLD x (ANDconst [63] y))) - // result: (ROTL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRD { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64SUB || v_0_1.Type != typ.UInt { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64MOVDconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpPPC64ANDconst || v_0_1_1.Type != typ.UInt || v_0_1_1.AuxInt != 63 { - break - } - y := v_0_1_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLD { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.Int64 || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRW { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] { + continue + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) (SLW x (ANDconst [31] y))) - // result: (ROTLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRW { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64SUB || v_0_1.Type != typ.UInt { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64MOVDconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpPPC64ANDconst || v_0_1_1.Type != typ.UInt || v_0_1_1.AuxInt != 31 { - break - } - y := v_0_1_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.Int32 || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (OR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c|d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c | d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c | d - return true + break } - // match: (OR (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c|d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c | d - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types // match: (OR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (ORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isU32Bit(c)) { + continue + } + v.reset(OpPPC64ORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpPPC64ORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVDconst [c]) x) - // cond: isU32Bit(c) - // result: (ORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - c := v_0.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpPPC64ORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (OR x0:(MOVBZload [i0] {s} p mem) o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8])) // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) @@ -12769,79 +12221,42 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { for { t := v.Type _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o1 := v.Args[1^_i0] + if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { + continue + } + x1 := o1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o1 := v.Args[1] - if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) - // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) - // result: @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o1 := v.Args[0] - if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR x0:(MOVBZload [i0] {s} p mem) o1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) @@ -12849,79 +12264,42 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { for { t := v.Type _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o1 := v.Args[1^_i0] + if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { + continue + } + x1 := o1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o1 := v.Args[1] - if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) - // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) - // result: @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o1 := v.Args[0] - if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR x0:(MOVBZload [i1] {s} p mem) o1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [8])) // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) @@ -12929,83 +12307,44 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { for { t := v.Type _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i1 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o1 := v.Args[1^_i0] + if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { + continue + } + x1 := o1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i0 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true } - i1 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o1 := v.Args[1] - if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [8]) x0:(MOVBZload [i1] {s} p mem)) - // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) - // result: @mergePoint(b,x0,x1) (MOVHBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o1 := v.Args[0] - if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i1 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } // match: (OR x0:(MOVBZload [i1] {s} p mem) o1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [8])) // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) @@ -13013,87 +12352,48 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { for { t := v.Type _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i1 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o1 := v.Args[1^_i0] + if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { + continue + } + x1 := o1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i0 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true } - i1 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o1 := v.Args[1] - if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [8]) x0:(MOVBZload [i1] {s} p mem)) - // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) - // result: @mergePoint(b,x0,x1) (MOVHBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o1 := v.Args[0] - if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { - break - } - x1 := o1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i1 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } return false } -func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { +func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -13103,101 +12403,53 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpPPC64SLWconst { + continue + } + n1 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i1 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + s1 := v.Args[1^_i0] + if s1.Op != OpPPC64SLWconst { + continue + } + n2 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i0 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = n1 + v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) + v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - n1 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i1 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpPPC64SLWconst { - break - } - n2 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = n1 - v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR s1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [n2]) s0:(SLWconst x0:(MOVBZload [i1] {s} p mem) [n1])) - // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) - // result: @mergePoint(b,x0,x1) (SLDconst (MOVHBRload (MOVDaddr [i0] {s} p) mem) [n1]) - for { - t := v.Type - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLWconst { - break - } - n2 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpPPC64SLWconst { - break - } - n1 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i1 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = n1 - v1 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR s0:(SLDconst x0:(MOVBZload [i1] {s} p mem) [n1]) s1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [n2])) // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) @@ -13205,101 +12457,53 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpPPC64SLDconst { + continue + } + n1 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i1 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + s1 := v.Args[1^_i0] + if s1.Op != OpPPC64SLDconst { + continue + } + n2 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i0 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = n1 + v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) + v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - n1 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i1 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpPPC64SLDconst { - break - } - n2 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = n1 - v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR s1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [n2]) s0:(SLDconst x0:(MOVBZload [i1] {s} p mem) [n1])) - // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) - // result: @mergePoint(b,x0,x1) (SLDconst (MOVHBRload (MOVDaddr [i0] {s} p) mem) [n1]) - for { - t := v.Type - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLDconst { - break - } - n2 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i0 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst { - break - } - n1 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i1 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = n1 - v1 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem))) // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) @@ -13307,243 +12511,65 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { + continue + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i3 := x2.AuxInt + s := x2.Aux + mem := x2.Args[1] + p := x2.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := o0.Args[_i1] + if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { + continue + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + x0 := o0.Args[1^_i1] + if x0.Op != OpPPC64MOVHZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR x0:(MOVHZload [i0] {s} p mem) s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]))) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)) s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24])) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR x0:(MOVHZload [i0] {s} p mem) s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16])) s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24])) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR s1:(SLDconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem))) // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) @@ -13551,249 +12577,65 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { + continue + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i3 := x2.AuxInt + s := x2.Aux + mem := x2.Args[1] + p := x2.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := o0.Args[_i1] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { + continue + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + x0 := o0.Args[1^_i1] + if x0.Op != OpPPC64MOVHZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s1:(SLDconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR x0:(MOVHZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [16]))) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR o0:(OR s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i3] {s} p mem) [24])) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR x0:(MOVHZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [16])) s1:(SLDconst x2:(MOVBZload [i3] {s} p mem) [24])) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR s1:(SLWconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR s0:(SLWconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem))) // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) @@ -13801,258 +12643,68 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { for { t := v.Type _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { + continue + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i0 := x2.AuxInt + s := x2.Aux + mem := x2.Args[1] + p := x2.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := o0.Args[_i1] + if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { + continue + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + x0 := o0.Args[1^_i1] + if x0.Op != OpPPC64MOVHBRload || x0.Type != t { + continue + } + _ = x0.Args[1] + x0_0 := x0.Args[0] + if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { + continue + } + i2 := x0_0.AuxInt + if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s1:(SLWconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) s0:(SLWconst x1:(MOVBZload [i1] {s} p mem) [16]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s0:(SLWconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem)) s1:(SLWconst x2:(MOVBZload [i0] {s} p mem) [24])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) s0:(SLWconst x1:(MOVBZload [i1] {s} p mem) [16])) s1:(SLWconst x2:(MOVBZload [i0] {s} p mem) [24])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - mem := x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - s := x0_0.Aux - p := x0_0.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } // match: (OR s1:(SLDconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem))) // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) @@ -14060,523 +12712,137 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { for { t := v.Type _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { + continue + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i0 := x2.AuxInt + s := x2.Aux + mem := x2.Args[1] + p := x2.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := o0.Args[_i1] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { + continue + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + x0 := o0.Args[1^_i1] + if x0.Op != OpPPC64MOVHBRload || x0.Type != t { + continue + } + _ = x0.Args[1] + x0_0 := x0.Args[0] + if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { + continue + } + i2 := x0_0.AuxInt + if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } - // match: (OR s1:(SLDconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [16]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem)) s1:(SLDconst x2:(MOVBZload [i0] {s} p mem) [24])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := o0.Args[1] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [16])) s1:(SLDconst x2:(MOVBZload [i0] {s} p mem) [24])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - mem := x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - s := x0_0.Aux - p := x0_0.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types // match: (OR x0:(MOVBZload [i3] {s} p mem) o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLWconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16]))) // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i3 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := o0.Args[_i1] + if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 { + continue + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + s1 := o0.Args[1^_i1] + if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 { + continue + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVHBRload || x2.Type != t { + continue + } + _ = x2.Args[1] + x2_0 := x2.Args[0] + if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { + continue + } + i0 := x2_0.AuxInt + if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } } - i3 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - _ = x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x0:(MOVBZload [i3] {s} p mem) o0:(OR s1:(SLWconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16]) s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [8]))) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i3 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - _ = x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLWconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16])) x0:(MOVBZload [i3] {s} p mem)) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s1 := o0.Args[1] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - _ = x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] { - break - } - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i3 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s1:(SLWconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16]) s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [8])) x0:(MOVBZload [i3] {s} p mem)) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - mem := x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - s := x2_0.Aux - p := x2_0.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i3 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } // match: (OR x0:(MOVBZload [i3] {s} p mem) o0:(OR s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLDconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16]))) // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) @@ -14584,258 +12850,68 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { for { t := v.Type _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i3 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := o0.Args[_i1] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 { + continue + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + s1 := o0.Args[1^_i1] + if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 { + continue + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVHBRload || x2.Type != t { + continue + } + _ = x2.Args[1] + x2_0 := x2.Args[0] + if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { + continue + } + i0 := x2_0.AuxInt + if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } } - i3 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - _ = x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x0:(MOVBZload [i3] {s} p mem) o0:(OR s1:(SLDconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16]) s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [8]))) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i3 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - _ = x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLDconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16])) x0:(MOVBZload [i3] {s} p mem)) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - _ = x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] { - break - } - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i3 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s1:(SLDconst x2:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [16]) s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [8])) x0:(MOVBZload [i3] {s} p mem)) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpPPC64MOVHBRload || x2.Type != t { - break - } - mem := x2.Args[1] - x2_0 := x2.Args[0] - if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr { - break - } - i0 := x2_0.AuxInt - s := x2_0.Aux - p := x2_0.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - x0 := v.Args[1] - if x0.Op != OpPPC64MOVBZload { - break - } - i3 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + break } // match: (OR s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32]) o0:(OR s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40]) s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [48]))) // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) @@ -14843,292 +12919,75 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { for { t := v.Type _ = v.Args[1] - s2 := v.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s2 := v.Args[_i0] + if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 { + continue + } + x2 := s2.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i3 := x2.AuxInt + s := x2.Aux + mem := x2.Args[1] + p := x2.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := o0.Args[_i1] + if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 { + continue + } + x1 := s1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i2 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + s0 := o0.Args[1^_i1] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 { + continue + } + x0 := s0.Args[0] + if x0.Op != OpPPC64MOVHBRload || x0.Type != t { + continue + } + _ = x0.Args[1] + x0_0 := x0.Args[0] + if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { + continue + } + i0 := x0_0.AuxInt + if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = 32 + v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i0 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32]) o0:(OR s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [48]) s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40]))) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) - for { - t := v.Type - _ = v.Args[1] - s2 := v.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i0 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR o0:(OR s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40]) s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [48])) s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i0 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s2 := v.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR o0:(OR s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i0] {s} p) mem) [48]) s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])) s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])) - // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - mem := x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i0 := x0_0.AuxInt - s := x0_0.Aux - p := x0_0.Args[0] - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s2 := v.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56]) o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) [32]))) // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) @@ -15136,1945 +12995,193 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { for { t := v.Type _ = v.Args[1] - s2 := v.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s2 := v.Args[_i0] + if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 { + continue + } + x2 := s2.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i0 := x2.AuxInt + s := x2.Aux + mem := x2.Args[1] + p := x2.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := o0.Args[_i1] + if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { + continue + } + x1 := s1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + s0 := o0.Args[1^_i1] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { + continue + } + x0 := s0.Args[0] + if x0.Op != OpPPC64MOVHBRload || x0.Type != t { + continue + } + _ = x0.Args[1] + x0_0 := x0.Args[0] + if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { + continue + } + i2 := x0_0.AuxInt + if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { + continue + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = 32 + v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56]) o0:(OR s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) [32]) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) - for { - t := v.Type - _ = v.Args[1] - s2 := v.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) [32])) s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := o0.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - _ = x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] { - break - } - s2 := v.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR o0:(OR s0:(SLDconst x0:(MOVHBRload (MOVDaddr [i2] {s} p) mem) [32]) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])) s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s0 := o0.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVHBRload || x0.Type != t { - break - } - mem := x0.Args[1] - x0_0 := x0.Args[0] - if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr { - break - } - i2 := x0_0.AuxInt - s := x0_0.Aux - p := x0_0.Args[0] - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s2 := v.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i0 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 32 - v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } + return false +} +func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem))))) // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) for { t := v.Type _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x4.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x5.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x5.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { - b := v.Block - config := b.Func.Config - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - s6 := v.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s5 := o5.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - s := x5.Aux - mem := x5.Args[1] - p := x5.Args[0] - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s4 := o4.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - s := x5.Aux - mem := x5.Args[1] - p := x5.Args[0] - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s3 := o3.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - s := x4.Aux - mem := x4.Args[1] - p := x4.Args[0] - x0 := o3.Args[1] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR o3:(OR x0:(MOVWZload {s} [i0] p mem) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o3) && clobber(o4) && clobber(o5) - // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - x0 := o3.Args[0] - if x0.Op != OpPPC64MOVWZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s3 := o3.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x4 := s3.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s4 := o4.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { - break - } - x5 := s4.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s5 := o5.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { - break - } - x6 := s5.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - s6 := v.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { - break - } - x7 := s6.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { - break - } - b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s6 := v.Args[_i0] + if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { + continue + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + continue + } + i7 := x7.AuxInt + s := x7.Aux + mem := x7.Args[1] + p := x7.Args[0] + o5 := v.Args[1^_i0] + if o5.Op != OpPPC64OR || o5.Type != t { + continue + } + _ = o5.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s5 := o5.Args[_i1] + if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { + continue + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + continue + } + i6 := x6.AuxInt + if x6.Aux != s { + continue + } + _ = x6.Args[1] + if p != x6.Args[0] || mem != x6.Args[1] { + continue + } + o4 := o5.Args[1^_i1] + if o4.Op != OpPPC64OR || o4.Type != t { + continue + } + _ = o4.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s4 := o4.Args[_i2] + if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { + continue + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + continue + } + i5 := x5.AuxInt + if x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + o3 := o4.Args[1^_i2] + if o3.Op != OpPPC64OR || o3.Type != t { + continue + } + _ = o3.Args[1] + for _i3 := 0; _i3 <= 1; _i3++ { + s3 := o3.Args[_i3] + if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { + continue + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + continue + } + i4 := x4.AuxInt + if x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + x0 := o3.Args[1^_i3] + if x0.Op != OpPPC64MOVWZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o3) && clobber(o4) && clobber(o5)) { + continue + } + b = mergePoint(b, x0, x4, x5, x6, x7) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + } + } + } + break } // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem))))) // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) @@ -17082,1701 +13189,114 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { for { t := v.Type _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]))))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem)) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x2.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem))) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]))) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR o1:(OR o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem)) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR o1:(OR o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]))) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - o0 := v.Args[1] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem)))) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])))) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem)) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]))) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]))) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - s1 := o0.Args[0] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - o1 := o0.Args[1] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem))) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR o1:(OR s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]))) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - s2 := o1.Args[0] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - s := x2.Aux - mem := x2.Args[1] - p := x2.Args[0] - o2 := o1.Args[1] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR o1:(OR o2:(OR s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload (MOVDaddr [i4] p) mem)) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - s3 := o2.Args[0] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - s := x3.Aux - mem := x3.Args[1] - p := x3.Args[0] - x4 := o2.Args[1] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - _ = x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - if p != x4_0.Args[0] || mem != x4.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(OR o1:(OR o2:(OR x4:(MOVWBRload (MOVDaddr [i4] p) mem) s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])) s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])) s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])) s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])) - // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpPPC64OR || o0.Type != t { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpPPC64OR || o1.Type != t { - break - } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpPPC64OR || o2.Type != t { - break - } - _ = o2.Args[1] - x4 := o2.Args[0] - if x4.Op != OpPPC64MOVWBRload || x4.Type != t { - break - } - mem := x4.Args[1] - x4_0 := x4.Args[0] - if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { - break - } - i4 := x4_0.AuxInt - p := x4_0.Args[0] - s3 := o2.Args[1] - if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { - break - } - x3 := s3.Args[0] - if x3.Op != OpPPC64MOVBZload { - break - } - i3 := x3.AuxInt - s := x3.Aux - _ = x3.Args[1] - if p != x3.Args[0] || mem != x3.Args[1] { - break - } - s2 := o1.Args[1] - if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { - break - } - x2 := s2.Args[0] - if x2.Op != OpPPC64MOVBZload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] || mem != x2.Args[1] { - break - } - s1 := o0.Args[1] - if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { - break - } - x1 := s1.Args[0] - if x1.Op != OpPPC64MOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] { - break - } - s0 := v.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { - break - } - x0 := s0.Args[0] - if x0.Op != OpPPC64MOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { + continue + } + x0 := s0.Args[0] + if x0.Op != OpPPC64MOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + o0 := v.Args[1^_i0] + if o0.Op != OpPPC64OR || o0.Type != t { + continue + } + _ = o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := o0.Args[_i1] + if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { + continue + } + x1 := s1.Args[0] + if x1.Op != OpPPC64MOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + o1 := o0.Args[1^_i1] + if o1.Op != OpPPC64OR || o1.Type != t { + continue + } + _ = o1.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s2 := o1.Args[_i2] + if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { + continue + } + x2 := s2.Args[0] + if x2.Op != OpPPC64MOVBZload { + continue + } + i2 := x2.AuxInt + if x2.Aux != s { + continue + } + _ = x2.Args[1] + if p != x2.Args[0] || mem != x2.Args[1] { + continue + } + o2 := o1.Args[1^_i2] + if o2.Op != OpPPC64OR || o2.Type != t { + continue + } + _ = o2.Args[1] + for _i3 := 0; _i3 <= 1; _i3++ { + s3 := o2.Args[_i3] + if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { + continue + } + x3 := s3.Args[0] + if x3.Op != OpPPC64MOVBZload { + continue + } + i3 := x3.AuxInt + if x3.Aux != s { + continue + } + _ = x3.Args[1] + if p != x3.Args[0] || mem != x3.Args[1] { + continue + } + x4 := o2.Args[1^_i3] + if x4.Op != OpPPC64MOVWBRload || x4.Type != t { + continue + } + _ = x4.Args[1] + x4_0 := x4.Args[0] + if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr { + continue + } + i4 := x4_0.AuxInt + if p != x4_0.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)) { + continue + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } + } + } + } + break } // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]))))) // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) @@ -18784,1710 +13304,114 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { for { t := v.Type _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]))))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - s := x5.Aux - mem := x5.Args[1] - p := x5.Args[0] - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - s := x5.Aux - mem := x5.Args[1] - p := x5.Args[0] - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - s := x4.Aux - mem := x4.Args[1] - p := x4.Args[0] - s0 := o3.Args[1] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR o3:(OR s0:(SLWconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - mem := x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - s := x3_0.Aux - p := x3_0.Args[0] - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + x7 := v.Args[_i0] + if x7.Op != OpPPC64MOVBZload { + continue + } + i7 := x7.AuxInt + s := x7.Aux + mem := x7.Args[1] + p := x7.Args[0] + o5 := v.Args[1^_i0] + if o5.Op != OpPPC64OR || o5.Type != t { + continue + } + _ = o5.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s6 := o5.Args[_i1] + if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { + continue + } + x6 := s6.Args[0] + if x6.Op != OpPPC64MOVBZload { + continue + } + i6 := x6.AuxInt + if x6.Aux != s { + continue + } + _ = x6.Args[1] + if p != x6.Args[0] || mem != x6.Args[1] { + continue + } + o4 := o5.Args[1^_i1] + if o4.Op != OpPPC64OR || o4.Type != t { + continue + } + _ = o4.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s5 := o4.Args[_i2] + if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { + continue + } + x5 := s5.Args[0] + if x5.Op != OpPPC64MOVBZload { + continue + } + i5 := x5.AuxInt + if x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + o3 := o4.Args[1^_i2] + if o3.Op != OpPPC64OR || o3.Type != t { + continue + } + _ = o3.Args[1] + for _i3 := 0; _i3 <= 1; _i3++ { + s4 := o3.Args[_i3] + if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { + continue + } + x4 := s4.Args[0] + if x4.Op != OpPPC64MOVBZload { + continue + } + i4 := x4.AuxInt + if x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + s0 := o3.Args[1^_i3] + if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { + continue + } + x3 := s0.Args[0] + if x3.Op != OpPPC64MOVWBRload || x3.Type != t { + continue + } + _ = x3.Args[1] + x3_0 := x3.Args[0] + if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { + continue + } + i0 := x3_0.AuxInt + if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { + continue + } + b = mergePoint(b, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } + } + } + } + break } // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]))))) // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) @@ -20495,1704 +13419,114 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { for { t := v.Type _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]))))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR x7:(MOVBZload [i7] {s} p mem) o5:(OR o4:(OR o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]))) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - x7 := v.Args[0] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - s := x7.Aux - mem := x7.Args[1] - p := x7.Args[0] - o5 := v.Args[1] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]))) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - s6 := o5.Args[0] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - s := x6.Aux - mem := x6.Args[1] - p := x6.Args[0] - o4 := o5.Args[1] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - s := x5.Aux - mem := x5.Args[1] - p := x5.Args[0] - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]))) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - s5 := o4.Args[0] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - s := x5.Aux - mem := x5.Args[1] - p := x5.Args[0] - o3 := o4.Args[1] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR o3:(OR s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s4 := o3.Args[0] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - s := x4.Aux - mem := x4.Args[1] - p := x4.Args[0] - s0 := o3.Args[1] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - _ = x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o5:(OR o4:(OR o3:(OR s0:(SLDconst x3:(MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])) s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])) s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])) x7:(MOVBZload [i7] {s} p mem)) - // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6) - // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) - for { - t := v.Type - _ = v.Args[1] - o5 := v.Args[0] - if o5.Op != OpPPC64OR || o5.Type != t { - break - } - _ = o5.Args[1] - o4 := o5.Args[0] - if o4.Op != OpPPC64OR || o4.Type != t { - break - } - _ = o4.Args[1] - o3 := o4.Args[0] - if o3.Op != OpPPC64OR || o3.Type != t { - break - } - _ = o3.Args[1] - s0 := o3.Args[0] - if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { - break - } - x3 := s0.Args[0] - if x3.Op != OpPPC64MOVWBRload || x3.Type != t { - break - } - mem := x3.Args[1] - x3_0 := x3.Args[0] - if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { - break - } - i0 := x3_0.AuxInt - s := x3_0.Aux - p := x3_0.Args[0] - s4 := o3.Args[1] - if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { - break - } - x4 := s4.Args[0] - if x4.Op != OpPPC64MOVBZload { - break - } - i4 := x4.AuxInt - if x4.Aux != s { - break - } - _ = x4.Args[1] - if p != x4.Args[0] || mem != x4.Args[1] { - break - } - s5 := o4.Args[1] - if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { - break - } - x5 := s5.Args[0] - if x5.Op != OpPPC64MOVBZload { - break - } - i5 := x5.AuxInt - if x5.Aux != s { - break - } - _ = x5.Args[1] - if p != x5.Args[0] || mem != x5.Args[1] { - break - } - s6 := o5.Args[1] - if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { - break - } - x6 := s6.Args[0] - if x6.Op != OpPPC64MOVBZload { - break - } - i6 := x6.AuxInt - if x6.Aux != s { - break - } - _ = x6.Args[1] - if p != x6.Args[0] || mem != x6.Args[1] { - break - } - x7 := v.Args[1] - if x7.Op != OpPPC64MOVBZload { - break - } - i7 := x7.AuxInt - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] || mem != x7.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { - break - } - b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + x7 := v.Args[_i0] + if x7.Op != OpPPC64MOVBZload { + continue + } + i7 := x7.AuxInt + s := x7.Aux + mem := x7.Args[1] + p := x7.Args[0] + o5 := v.Args[1^_i0] + if o5.Op != OpPPC64OR || o5.Type != t { + continue + } + _ = o5.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s6 := o5.Args[_i1] + if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { + continue + } + x6 := s6.Args[0] + if x6.Op != OpPPC64MOVBZload { + continue + } + i6 := x6.AuxInt + if x6.Aux != s { + continue + } + _ = x6.Args[1] + if p != x6.Args[0] || mem != x6.Args[1] { + continue + } + o4 := o5.Args[1^_i1] + if o4.Op != OpPPC64OR || o4.Type != t { + continue + } + _ = o4.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s5 := o4.Args[_i2] + if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { + continue + } + x5 := s5.Args[0] + if x5.Op != OpPPC64MOVBZload { + continue + } + i5 := x5.AuxInt + if x5.Aux != s { + continue + } + _ = x5.Args[1] + if p != x5.Args[0] || mem != x5.Args[1] { + continue + } + o3 := o4.Args[1^_i2] + if o3.Op != OpPPC64OR || o3.Type != t { + continue + } + _ = o3.Args[1] + for _i3 := 0; _i3 <= 1; _i3++ { + s4 := o3.Args[_i3] + if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { + continue + } + x4 := s4.Args[0] + if x4.Op != OpPPC64MOVBZload { + continue + } + i4 := x4.AuxInt + if x4.Aux != s { + continue + } + _ = x4.Args[1] + if p != x4.Args[0] || mem != x4.Args[1] { + continue + } + s0 := o3.Args[1^_i3] + if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { + continue + } + x3 := s0.Args[0] + if x3.Op != OpPPC64MOVWBRload || x3.Type != t { + continue + } + _ = x3.Args[1] + x3_0 := x3.Args[0] + if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr { + continue + } + i0 := x3_0.AuxInt + if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)) { + continue + } + b = mergePoint(b, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) + return true + } + } + } + } + break } return false } @@ -22319,336 +13653,185 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { // result: (ROTLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLDconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRDconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpPPC64ROTLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (ROTLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR (SLWconst x [c]) (SRWconst x [d])) // cond: d == 32-c // result: (ROTLWconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLWconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpPPC64ROTLWconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (ROTLWconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLWconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) // result: (ROTL x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLD { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLD { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRD { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] { + continue + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRD { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) (SLD x (ANDconst [63] y))) - // result: (ROTL x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRD { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64SUB || v_0_1.Type != typ.UInt { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64MOVDconst || v_0_1_0.AuxInt != 64 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpPPC64ANDconst || v_0_1_1.Type != typ.UInt || v_0_1_1.AuxInt != 63 { - break - } - y := v_0_1_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLD { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.Int64 || v_1_1.AuxInt != 63 || y != v_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTL) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SLW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64SLW { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 { + continue + } + y := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64SRW { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] { + continue + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 { - break - } - y := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) (SLW x (ANDconst [31] y))) - // result: (ROTLW x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64SRW { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64SUB || v_0_1.Type != typ.UInt { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64MOVDconst || v_0_1_0.AuxInt != 32 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpPPC64ANDconst || v_0_1_1.Type != typ.UInt || v_0_1_1.AuxInt != 31 { - break - } - y := v_0_1_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SLW { - break - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.Int32 || v_1_1.AuxInt != 31 || y != v_1_1.Args[0] { - break - } - v.reset(OpPPC64ROTLW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c ^ d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c ^ d - return true + break } - // match: (XOR (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c^d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c ^ d - return true - } - return false -} -func rewriteValuePPC64_OpPPC64XOR_10(v *Value) bool { // match: (XOR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := v_1.AuxInt + if !(isU32Bit(c)) { + continue + } + v.reset(OpPPC64XORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpPPC64XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVDconst [c]) x) - // cond: isU32Bit(c) - // result: (XORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - c := v_0.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpPPC64XORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -23671,40 +14854,21 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1.Args[_i0] + v_1_1 := v_1.Args[1^_i0] + if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { + continue + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = 31 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (Rsh32Ux64 x (AND (MOVDconst [31]) y)) - // result: (SRW x (ANDconst [31] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64AND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 31 { - break - } - v.reset(OpPPC64SRW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = 31 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (Rsh32Ux64 x (ANDconst [31] y)) // result: (SRW x (ANDconst [31] y)) @@ -23775,65 +14939,27 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { break } _ = v_1_1.Args[1] - y := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1_1.Args[_i0] + v_1_1_1 := v_1_1.Args[1^_i0] + if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 { + continue + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 32 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 31 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } - // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND (MOVDconst [31]) y))) - // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 32 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - y := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 31 { - break - } - v.reset(OpPPC64SRW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 32 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 31 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuePPC64_OpRsh32Ux64_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) for { @@ -24089,40 +15215,21 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1.Args[_i0] + v_1_1 := v_1.Args[1^_i0] + if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { + continue + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRAW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = 31 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (Rsh32x64 x (AND (MOVDconst [31]) y)) - // result: (SRAW x (ANDconst [31] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64AND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 31 { - break - } - v.reset(OpPPC64SRAW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = 31 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (Rsh32x64 x (ANDconst [31] y)) // result: (SRAW x (ANDconst [31] y)) @@ -24193,65 +15300,27 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { break } _ = v_1_1.Args[1] - y := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1_1.Args[_i0] + v_1_1_1 := v_1_1.Args[1^_i0] + if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 { + continue + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRAW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 32 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 31 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } - // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND (MOVDconst [31]) y))) - // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 32 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - y := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 31 { - break - } - v.reset(OpPPC64SRAW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 32 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 31 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuePPC64_OpRsh32x64_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (Rsh32x64 x y) // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) for { @@ -24505,40 +15574,21 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1.Args[_i0] + v_1_1 := v_1.Args[1^_i0] + if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { + continue + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (Rsh64Ux64 x (AND (MOVDconst [63]) y)) - // result: (SRD x (ANDconst [63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64AND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 63 { - break - } - v.reset(OpPPC64SRD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (Rsh64Ux64 x (ANDconst [63] y)) // result: (SRD x (ANDconst [63] y)) @@ -24609,65 +15659,27 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { break } _ = v_1_1.Args[1] - y := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1_1.Args[_i0] + v_1_1_1 := v_1_1.Args[1^_i0] + if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 { + continue + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 64 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 63 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } - // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND (MOVDconst [63]) y))) - // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 64 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - y := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 63 { - break - } - v.reset(OpPPC64SRD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 64 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 63 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuePPC64_OpRsh64Ux64_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (Rsh64Ux64 x y) // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) for { @@ -24923,40 +15935,21 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1.Args[_i0] + v_1_1 := v_1.Args[1^_i0] + if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { + continue + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRAD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (Rsh64x64 x (AND (MOVDconst [63]) y)) - // result: (SRAD x (ANDconst [63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64AND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 63 { - break - } - v.reset(OpPPC64SRAD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (Rsh64x64 x (ANDconst [63] y)) // result: (SRAD x (ANDconst [63] y)) @@ -25027,65 +16020,27 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { break } _ = v_1_1.Args[1] - y := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + y := v_1_1.Args[_i0] + v_1_1_1 := v_1_1.Args[1^_i0] + if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 { + continue + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true } - v.reset(OpPPC64SRAD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 64 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 63 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + break } - // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND (MOVDconst [63]) y))) - // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 64 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - y := v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 63 { - break - } - v.reset(OpPPC64SRAD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = 64 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = 63 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuePPC64_OpRsh64x64_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (Rsh64x64 x y) // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) for { @@ -26664,17 +17619,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64EQ) + v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64EQ) - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 @@ -26688,17 +17647,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64OR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64EQ) + v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64EQ) - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (EQ (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 @@ -26712,17 +17675,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64XOR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64EQ) + v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64EQ) - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } case BlockPPC64GE: // match: (GE (FlagEQ) yes no) @@ -26805,17 +17772,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64GE) + v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64GE) - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 @@ -26829,17 +17800,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64OR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64GE) + v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64GE) - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GE (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 @@ -26853,17 +17828,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64XOR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64GE) + v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64GE) - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } case BlockPPC64GT: // match: (GT (FlagEQ) yes no) @@ -26947,17 +17926,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64GT) + v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64GT) - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 @@ -26971,17 +17954,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64OR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64GT) + v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64GT) - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (GT (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 @@ -26995,17 +17982,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64XOR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64GT) + v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64GT) - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } case BlockIf: // match: (If (Equal cc) yes no) @@ -27190,17 +18181,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64LE) + v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64LE) - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 @@ -27214,17 +18209,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64OR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64LE) + v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64LE) - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LE (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 @@ -27238,17 +18237,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64XOR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64LE) + v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64LE) - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } case BlockPPC64LT: // match: (LT (FlagEQ) yes no) @@ -27332,17 +18335,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64LT) + v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64LT) - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 @@ -27356,17 +18363,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64OR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64LT) + v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64LT) - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (LT (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 @@ -27380,17 +18391,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64XOR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64LT) + v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64LT) - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } case BlockPPC64NE: // match: (NE (CMPWconst [0] (Equal cc)) yes no) @@ -27673,17 +18688,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64AND { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64NE) + v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64NE) - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 @@ -27697,17 +18716,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64OR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64NE) + v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64NE) - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } // match: (NE (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 @@ -27721,17 +18744,21 @@ func rewriteBlockPPC64(b *Block) bool { if z.Op != OpPPC64XOR { break } - y := z.Args[1] - x := z.Args[0] - if !(z.Uses == 1) { - break + _ = z.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := z.Args[_i0] + y := z.Args[1^_i0] + if !(z.Uses == 1) { + continue + } + b.Reset(BlockPPC64NE) + v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.AddControl(v0) + return true } - b.Reset(BlockPPC64NE) - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.AddControl(v0) - return true + break } } return false diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 8ccfaa54f9..2c23609d48 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -3195,39 +3195,24 @@ func rewriteValueRISCV64_OpRISCV64ADD_0(v *Value) bool { // match: (ADD (MOVDconst [off]) ptr) // cond: is32Bit(off) // result: (ADDI [off] ptr) - for { - ptr := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpRISCV64MOVDconst { - break - } - off := v_0.AuxInt - if !(is32Bit(off)) { - break - } - v.reset(OpRISCV64ADDI) - v.AuxInt = off - v.AddArg(ptr) - return true - } - // match: (ADD ptr (MOVDconst [off])) - // cond: is32Bit(off) - // result: (ADDI [off] ptr) for { _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpRISCV64MOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpRISCV64MOVDconst { + continue + } + off := v_0.AuxInt + ptr := v.Args[1^_i0] + if !(is32Bit(off)) { + continue + } + v.reset(OpRISCV64ADDI) + v.AuxInt = off + v.AddArg(ptr) + return true } - off := v_1.AuxInt - if !(is32Bit(off)) { - break - } - v.reset(OpRISCV64ADDI) - v.AuxInt = off - v.AddArg(ptr) - return true + break } return false } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index e7c8120ea0..b04e1439bf 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -499,7 +499,7 @@ func rewriteValueS390X(v *Value) bool { case OpS390XADDload: return rewriteValueS390X_OpS390XADDload_0(v) case OpS390XAND: - return rewriteValueS390X_OpS390XAND_0(v) || rewriteValueS390X_OpS390XAND_10(v) + return rewriteValueS390X_OpS390XAND_0(v) case OpS390XANDW: return rewriteValueS390X_OpS390XANDW_0(v) case OpS390XANDWconst: @@ -585,7 +585,7 @@ func rewriteValueS390X(v *Value) bool { case OpS390XMOVBstoreconst: return rewriteValueS390X_OpS390XMOVBstoreconst_0(v) case OpS390XMOVBstoreidx: - return rewriteValueS390X_OpS390XMOVBstoreidx_0(v) || rewriteValueS390X_OpS390XMOVBstoreidx_10(v) || rewriteValueS390X_OpS390XMOVBstoreidx_20(v) || rewriteValueS390X_OpS390XMOVBstoreidx_30(v) + return rewriteValueS390X_OpS390XMOVBstoreidx_0(v) case OpS390XMOVDaddridx: return rewriteValueS390X_OpS390XMOVDaddridx_0(v) case OpS390XMOVDload: @@ -601,7 +601,7 @@ func rewriteValueS390X(v *Value) bool { case OpS390XMOVHBRstore: return rewriteValueS390X_OpS390XMOVHBRstore_0(v) case OpS390XMOVHBRstoreidx: - return rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v) || rewriteValueS390X_OpS390XMOVHBRstoreidx_10(v) + return rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v) case OpS390XMOVHZload: return rewriteValueS390X_OpS390XMOVHZload_0(v) case OpS390XMOVHZloadidx: @@ -619,7 +619,7 @@ func rewriteValueS390X(v *Value) bool { case OpS390XMOVHstoreconst: return rewriteValueS390X_OpS390XMOVHstoreconst_0(v) case OpS390XMOVHstoreidx: - return rewriteValueS390X_OpS390XMOVHstoreidx_0(v) || rewriteValueS390X_OpS390XMOVHstoreidx_10(v) + return rewriteValueS390X_OpS390XMOVHstoreidx_0(v) case OpS390XMOVWBRstore: return rewriteValueS390X_OpS390XMOVWBRstore_0(v) case OpS390XMOVWBRstoreidx: @@ -641,7 +641,7 @@ func rewriteValueS390X(v *Value) bool { case OpS390XMOVWstoreconst: return rewriteValueS390X_OpS390XMOVWstoreconst_0(v) case OpS390XMOVWstoreidx: - return rewriteValueS390X_OpS390XMOVWstoreidx_0(v) || rewriteValueS390X_OpS390XMOVWstoreidx_10(v) + return rewriteValueS390X_OpS390XMOVWstoreidx_0(v) case OpS390XMULLD: return rewriteValueS390X_OpS390XMULLD_0(v) case OpS390XMULLDconst: @@ -663,9 +663,9 @@ func rewriteValueS390X(v *Value) bool { case OpS390XNOTW: return rewriteValueS390X_OpS390XNOTW_0(v) case OpS390XOR: - return rewriteValueS390X_OpS390XOR_0(v) || rewriteValueS390X_OpS390XOR_10(v) || rewriteValueS390X_OpS390XOR_20(v) || rewriteValueS390X_OpS390XOR_30(v) || rewriteValueS390X_OpS390XOR_40(v) || rewriteValueS390X_OpS390XOR_50(v) || rewriteValueS390X_OpS390XOR_60(v) || rewriteValueS390X_OpS390XOR_70(v) || rewriteValueS390X_OpS390XOR_80(v) || rewriteValueS390X_OpS390XOR_90(v) || rewriteValueS390X_OpS390XOR_100(v) || rewriteValueS390X_OpS390XOR_110(v) || rewriteValueS390X_OpS390XOR_120(v) || rewriteValueS390X_OpS390XOR_130(v) || rewriteValueS390X_OpS390XOR_140(v) || rewriteValueS390X_OpS390XOR_150(v) || rewriteValueS390X_OpS390XOR_160(v) + return rewriteValueS390X_OpS390XOR_0(v) || rewriteValueS390X_OpS390XOR_10(v) || rewriteValueS390X_OpS390XOR_20(v) case OpS390XORW: - return rewriteValueS390X_OpS390XORW_0(v) || rewriteValueS390X_OpS390XORW_10(v) || rewriteValueS390X_OpS390XORW_20(v) || rewriteValueS390X_OpS390XORW_30(v) || rewriteValueS390X_OpS390XORW_40(v) || rewriteValueS390X_OpS390XORW_50(v) || rewriteValueS390X_OpS390XORW_60(v) || rewriteValueS390X_OpS390XORW_70(v) || rewriteValueS390X_OpS390XORW_80(v) + return rewriteValueS390X_OpS390XORW_0(v) || rewriteValueS390X_OpS390XORW_10(v) case OpS390XORWconst: return rewriteValueS390X_OpS390XORWconst_0(v) case OpS390XORWload: @@ -6589,159 +6589,93 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { // result: (ADDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpS390XADDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpS390XADDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (MOVDconst [c]) x) - // cond: is32Bit(c) - // result: (ADDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpS390XADDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (RLLGconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLDconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XSRDconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpS390XRLLGconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADD (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (RLLGconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADD idx (MOVDaddr [c] {s} ptr)) // cond: ptr.Op != OpSB && idx.Op != OpSB // result: (MOVDaddridx [c] {s} ptr idx) for { _ = v.Args[1] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDaddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + idx := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDaddr { + continue + } + c := v_1.AuxInt + s := v_1.Aux + ptr := v_1.Args[0] + if !(ptr.Op != OpSB && idx.Op != OpSB) { + continue + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + v.Aux = s + v.AddArg(ptr) + v.AddArg(idx) + return true } - c := v_1.AuxInt - s := v_1.Aux - ptr := v_1.Args[0] - if !(ptr.Op != OpSB && idx.Op != OpSB) { - break - } - v.reset(OpS390XMOVDaddridx) - v.AuxInt = c - v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - return true - } - // match: (ADD (MOVDaddr [c] {s} ptr) idx) - // cond: ptr.Op != OpSB && idx.Op != OpSB - // result: (MOVDaddridx [c] {s} ptr idx) - for { - idx := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - c := v_0.AuxInt - s := v_0.Aux - ptr := v_0.Args[0] - if !(ptr.Op != OpSB && idx.Op != OpSB) { - break - } - v.reset(OpS390XMOVDaddridx) - v.AuxInt = c - v.Aux = s - v.AddArg(ptr) - v.AddArg(idx) - return true + break } // match: (ADD x (NEG y)) // result: (SUB x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XNEG { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XNEG { + continue + } + y := v_1.Args[0] + v.reset(OpS390XSUB) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpS390XSUB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADD (NEG y) x) - // result: (SUB x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XNEG { - break - } - y := v_0.Args[0] - v.reset(OpS390XSUB) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -6749,52 +6683,29 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVDload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XADDload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ADDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -6804,37 +6715,22 @@ func rewriteValueS390X_OpS390XADDC_0(v *Value) bool { // result: (ADDCconst x [c]) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(is16Bit(c)) { + continue + } + v.reset(OpS390XADDCconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is16Bit(c)) { - break - } - v.reset(OpS390XADDCconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDC (MOVDconst [c]) x) - // cond: is16Bit(c) - // result: (ADDCconst x [c]) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(is16Bit(c)) { - break - } - v.reset(OpS390XADDCconst) - v.AuxInt = c - v.AddArg(x) - return true + break } return false } @@ -6913,107 +6809,64 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { // result: (ADDWconst [int64(int32(c))] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpS390XADDWconst) + v.AuxInt = int64(int32(c)) + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpS390XADDWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true - } - // match: (ADDW (MOVDconst [c]) x) - // result: (ADDWconst [int64(int32(c))] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XADDWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true + break } // match: (ADDW (SLWconst x [c]) (SRWconst x [d])) // cond: d == 32-c // result: (RLLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLWconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XSRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpS390XRLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDW (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (RLLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLWconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ADDW x (NEGW y)) // result: (SUBW x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XNEGW { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XNEGW { + continue + } + y := v_1.Args[0] + v.reset(OpS390XSUBW) + v.AddArg(x) + v.AddArg(y) + return true } - y := v_1.Args[0] - v.reset(OpS390XSUBW) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (ADDW (NEGW y) x) - // result: (SUBW x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XNEGW { - break - } - y := v_0.Args[0] - v.reset(OpS390XSUBW) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -7021,52 +6874,29 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XADDWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ADDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -7074,52 +6904,29 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWZload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XADDWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ADDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -7424,235 +7231,139 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // result: (ANDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c) && c < 0) { + continue + } + v.reset(OpS390XANDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c) && c < 0) { - break - } - v.reset(OpS390XANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [c]) x) - // cond: is32Bit(c) && c < 0 - // result: (ANDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c) && c < 0) { - break - } - v.reset(OpS390XANDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (AND x (MOVDconst [c])) // cond: is32Bit(c) && c >= 0 // result: (MOVWZreg (ANDWconst [int64(int32(c))] x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c) && c >= 0) { + continue + } + v.reset(OpS390XMOVWZreg) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int64(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(is32Bit(c) && c >= 0) { - break - } - v.reset(OpS390XMOVWZreg) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(int32(c)) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (AND (MOVDconst [c]) x) - // cond: is32Bit(c) && c >= 0 - // result: (MOVWZreg (ANDWconst [int64(int32(c))] x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c) && c >= 0) { - break - } - v.reset(OpS390XMOVWZreg) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = int64(int32(c)) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (AND x (MOVDconst [0xFF])) // result: (MOVBZreg x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFF { + continue + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true } - v.reset(OpS390XMOVBZreg) - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [0xFF]) x) - // result: (MOVBZreg x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != 0xFF { - break - } - v.reset(OpS390XMOVBZreg) - v.AddArg(x) - return true + break } // match: (AND x (MOVDconst [0xFFFF])) // result: (MOVHZreg x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFFFF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFFFF { + continue + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true } - v.reset(OpS390XMOVHZreg) - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [0xFFFF]) x) - // result: (MOVHZreg x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != 0xFFFF { - break - } - v.reset(OpS390XMOVHZreg) - v.AddArg(x) - return true + break } // match: (AND x (MOVDconst [0xFFFFFFFF])) // result: (MOVWZreg x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFFFFFFFF { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFFFFFFFF { + continue + } + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true } - v.reset(OpS390XMOVWZreg) - v.AddArg(x) - return true + break } - // match: (AND (MOVDconst [0xFFFFFFFF]) x) - // result: (MOVWZreg x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != 0xFFFFFFFF { - break - } - v.reset(OpS390XMOVWZreg) - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XAND_10(v *Value) bool { - b := v.Block // match: (AND (MOVDconst [^(-1<<63)]) (LGDR x)) // result: (LGDR (LPDFR x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != ^(-1<<63) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != ^(-1<<63) { + continue + } + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XLGDR { + continue + } + t := v_1.Type + x := v_1.Args[0] + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true } - v_1 := v.Args[1] - if v_1.Op != OpS390XLGDR { - break - } - t := v_1.Type - x := v_1.Args[0] - v.reset(OpS390XLGDR) - v.Type = t - v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (AND (LGDR x) (MOVDconst [^(-1<<63)])) - // result: (LGDR (LPDFR x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XLGDR { - break - } - t := v_0.Type - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != ^(-1<<63) { - break - } - v.reset(OpS390XLGDR) - v.Type = t - v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (AND (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c&d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XMOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c & d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c & d - return true - } - // match: (AND (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c&d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c & d - return true + break } // match: (AND x x) // result: x @@ -7672,52 +7383,29 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVDload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XANDload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (AND g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ANDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -7726,30 +7414,19 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { // result: (ANDWconst [int64(int32(c))] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpS390XANDWconst) + v.AuxInt = int64(int32(c)) + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpS390XANDWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true - } - // match: (ANDW (MOVDconst [c]) x) - // result: (ANDWconst [int64(int32(c))] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XANDWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true + break } // match: (ANDW x x) // result: x @@ -7769,52 +7446,29 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XANDWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ANDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -7822,52 +7476,29 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWZload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XANDWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ANDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -9119,72 +8750,46 @@ func rewriteValueS390X_OpS390XCPSDR_0(v *Value) bool { func rewriteValueS390X_OpS390XFADD_0(v *Value) bool { // match: (FADD (FMUL y z) x) // result: (FMADD x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XFMUL { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpS390XFMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (FADD x (FMUL y z)) - // result: (FMADD x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XFMUL { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XFMUL { + continue + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v.Args[1^_i0] + v.reset(OpS390XFMADD) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpS390XFMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } func rewriteValueS390X_OpS390XFADDS_0(v *Value) bool { // match: (FADDS (FMULS y z) x) // result: (FMADDS x y z) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XFMULS { - break - } - z := v_0.Args[1] - y := v_0.Args[0] - v.reset(OpS390XFMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } - // match: (FADDS x (FMULS y z)) - // result: (FMADDS x y z) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XFMULS { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XFMULS { + continue + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v.Args[1^_i0] + v.reset(OpS390XFMADDS) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true } - z := v_1.Args[1] - y := v_1.Args[0] - v.reset(OpS390XFMADDS) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true + break } return false } @@ -9318,42 +8923,22 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XFMOVDloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XFMOVDloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (FMOVDload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (FMOVDloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XFMOVDloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -9501,46 +9086,24 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XFMOVDstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (FMOVDstore [off] {sym} (ADD idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (FMOVDstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XFMOVDstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } @@ -9709,42 +9272,22 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XFMOVSloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XFMOVSloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (FMOVSload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (FMOVSloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XFMOVSloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -9892,46 +9435,24 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XFMOVSstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (FMOVSstore [off] {sym} (ADD idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (FMOVSstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XFMOVSstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } @@ -10107,25 +9628,6 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { } // match: (LDGR (AND (MOVDconst [^(-1<<63)]) x)) // result: (LPDFR (LDGR x)) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpS390XAND { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != ^(-1<<63) { - break - } - v.reset(OpS390XLPDFR) - v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (LDGR (AND x (MOVDconst [^(-1<<63)]))) - // result: (LPDFR (LDGR x)) for { t := v.Type v_0 := v.Args[0] @@ -10133,38 +9635,22 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpS390XMOVDconst || v_0_1.AuxInt != ^(-1<<63) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != ^(-1<<63) { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpS390XLPDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true } - v.reset(OpS390XLPDFR) - v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (LDGR (OR (MOVDconst [-1<<63]) x)) // result: (LNDFR (LDGR x)) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpS390XOR { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { - break - } - v.reset(OpS390XLNDFR) - v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (LDGR (OR x (MOVDconst [-1<<63]))) - // result: (LNDFR (LDGR x)) for { t := v.Type v_0 := v.Args[0] @@ -10172,16 +9658,19 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpS390XMOVDconst || v_0_1.AuxInt != -1<<63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpS390XLNDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true } - v.reset(OpS390XLNDFR) - v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (LDGR x:(ORload [off] {sym} (MOVDconst [-1<<63]) ptr mem)) // cond: x.Uses == 1 && clobber(x) @@ -10571,42 +10060,22 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVBZloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBZload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVBZloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -10618,48 +10087,26 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVBZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -10668,48 +10115,26 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVBZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -11085,42 +10510,22 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVBloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -11132,48 +10537,26 @@ func rewriteValueS390X_OpS390XMOVBloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVBloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -11182,48 +10565,26 @@ func rewriteValueS390X_OpS390XMOVBloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVBloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -11623,46 +10984,24 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} (ADD idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVBstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) @@ -11727,9 +11066,6 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w mem) @@ -11759,6 +11095,9 @@ func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { // match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w0 mem) @@ -12006,52 +11345,28 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: is20Bit(c+d) - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) // cond: is20Bit(c+d) @@ -12060,52 +11375,28 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVBstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: is20Bit(c+d) - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -12114,122 +11405,34 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -12238,145 +11441,39 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + continue + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVBstoreidx_10(v *Value) bool { - // match: (MOVBstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -12385,122 +11482,34 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -12509,146 +11518,40 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w0 := v.Args[2] + if w0.Op != OpS390XSRWconst { + continue + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVBstoreidx_20(v *Value) bool { // match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) @@ -12656,122 +11559,34 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_20(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHBRstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -12780,142 +11595,39 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_20(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHBRstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) @@ -12924,125 +11636,34 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_20(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHBRstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVBstoreidx_30(v *Value) bool { - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -13051,142 +11672,39 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_30(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVHBRstoreidx) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -13420,42 +11938,22 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVDloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVDloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -13467,48 +11965,26 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVDloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -13517,48 +11993,26 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVDloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -13677,46 +12131,24 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off] {sym} (ADD idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVDstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x) @@ -13874,52 +12306,28 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: is20Bit(c+d) - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) // cond: is20Bit(c+d) @@ -13928,52 +12336,28 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: is20Bit(c+d) - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } return false } @@ -14114,122 +12498,34 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -14238,142 +12534,39 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) @@ -14382,125 +12575,34 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHBRstoreidx_10(v *Value) bool { - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -14509,142 +12611,39 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -14756,42 +12755,22 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVHZloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -14803,48 +12782,26 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -14853,48 +12810,26 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -15262,42 +13197,22 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVHloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -15309,48 +13224,26 @@ func rewriteValueS390X_OpS390XMOVHloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVHloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -15359,48 +13252,26 @@ func rewriteValueS390X_OpS390XMOVHloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVHloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -15852,46 +13723,24 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off] {sym} (ADD idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVHstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) @@ -15956,9 +13805,6 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XMOVHstore_10(v *Value) bool { // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w mem) @@ -15988,6 +13834,9 @@ func rewriteValueS390X_OpS390XMOVHstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XMOVHstore_10(v *Value) bool { // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w0 mem) @@ -16114,52 +13963,28 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: is20Bit(c+d) - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) // cond: is20Bit(c+d) @@ -16168,52 +13993,28 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: is20Bit(c+d) - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -16222,122 +14023,34 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -16346,145 +14059,39 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + continue + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHstoreidx_10(v *Value) bool { - // match: (MOVHstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -16493,122 +14100,34 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != 16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -16617,142 +14136,39 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_10(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w0 := v.Args[2] + if w0.Op != OpS390XSRWconst { + continue + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst || x_2.AuxInt != j+16 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -16830,122 +14246,34 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { + continue + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVDBRstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -16954,142 +14282,39 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { + continue + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + w0 := x.Args[2] + if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVDBRstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} idx p w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} idx p w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -17201,42 +14426,22 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVWZloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -17248,48 +14453,26 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -17298,48 +14481,26 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -17707,42 +14868,22 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off] {sym} (ADD idx ptr) mem) - // cond: ptr.Op != OpSB - // result: (MOVWloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -17754,48 +14895,26 @@ func rewriteValueS390X_OpS390XMOVWloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: is20Bit(c+d) - // result: (MOVWloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } // match: (MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: is20Bit(c+d) @@ -17804,48 +14923,26 @@ func rewriteValueS390X_OpS390XMOVWloadidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: is20Bit(c+d) - // result: (MOVWloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + break } return false } @@ -18328,46 +15425,24 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { if v_0.Op != OpS390XADD { break } - idx := v_0.Args[1] - ptr := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v_0.Args[_i0] + idx := v_0.Args[1^_i0] + val := v.Args[1] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} (ADD idx ptr) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[1] - idx := v_0.Args[0] - val := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) @@ -18432,9 +15507,6 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool { // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x) // result: (STM2 [i-4] {s} p w0 w1 mem) @@ -18465,6 +15537,9 @@ func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool { // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) // result: (STM3 [i-8] {s} p w0 w1 w2 mem) @@ -18623,52 +15698,28 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XADDconst { + continue + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1^_i0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: is20Bit(c+d) - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) // cond: is20Bit(c+d) @@ -18677,52 +15728,28 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { c := v.AuxInt sym := v.Aux mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + ptr := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XADDconst { + continue + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + if !(is20Bit(c + d)) { + continue + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: is20Bit(c+d) - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - if !(is20Bit(c + d)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true + break } // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -18731,122 +15758,34 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != 32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true + break } // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) // cond: x.Uses == 1 && clobber(x) @@ -18855,145 +15794,39 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { i := v.AuxInt s := v.Aux _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + p := v.Args[_i0] + idx := v.Args[1^_i0] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + continue + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { + continue + } + mem := x.Args[3] + for _i1 := 0; _i1 <= 1; _i1++ { + if p != x.Args[_i1] || idx != x.Args[1^_i1] { + continue + } + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { + continue + } + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWstoreidx_10(v *Value) bool { - // match: (MOVWstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if p != x.Args[0] || idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { - break - } - mem := x.Args[3] - if idx != x.Args[0] || p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst || x_2.AuxInt != j+32 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true + break } return false } @@ -19003,37 +15836,22 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { // result: (MULLDconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(is32Bit(c)) { + continue + } + v.reset(OpS390XMULLDconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpS390XMULLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULLD (MOVDconst [c]) x) - // cond: is32Bit(c) - // result: (MULLDconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpS390XMULLDconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -19041,52 +15859,29 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVDload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XMULLDload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (MULLDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -19275,30 +16070,19 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { // result: (MULLWconst [int64(int32(c))] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpS390XMULLWconst) + v.AuxInt = int64(int32(c)) + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpS390XMULLWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true - } - // match: (MULLW (MOVDconst [c]) x) - // result: (MULLWconst [int64(int32(c))] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMULLWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true + break } // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -19306,52 +16090,29 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -19359,52 +16120,29 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWZload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -19655,592 +16393,251 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // result: (ORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(isU32Bit(c)) { + continue + } + v.reset(OpS390XORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpS390XORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } - // match: (OR (MOVDconst [c]) x) - // cond: isU32Bit(c) - // result: (ORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpS390XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (SLDconst x [c]) (SRDconst x [d])) + // match: ( OR (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (RLLGconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLDconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XSRDconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpS390XRLLGconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (RLLGconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (OR (MOVDconst [-1<<63]) (LGDR x)) // result: (LGDR (LNDFR x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != -1<<63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != -1<<63 { + continue + } + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XLGDR { + continue + } + t := v_1.Type + x := v_1.Args[0] + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true } - v_1 := v.Args[1] - if v_1.Op != OpS390XLGDR { - break - } - t := v_1.Type - x := v_1.Args[0] - v.reset(OpS390XLGDR) - v.Type = t - v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (LGDR x) (MOVDconst [-1<<63])) - // result: (LGDR (LNDFR x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XLGDR { - break - } - t := v_0.Type - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != -1<<63 { - break - } - v.reset(OpS390XLGDR) - v.Type = t - v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR y))) // result: (LGDR (CPSDR y x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XSRDconst || v_0_0.AuxInt != 63 { + continue + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XLGDR { + continue + } + x := v_0_0_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XLGDR { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpS390XLPDFR { + continue + } + t := v_1_0.Type + y := v_1_0.Args[0] + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XSRDconst || v_0_0.AuxInt != 63 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpS390XLGDR { - break - } - x := v_0_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XLGDR { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XLPDFR { - break - } - t := v_1_0.Type - y := v_1_0.Args[0] - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (LGDR (LPDFR y)) (SLDconst [63] (SRDconst [63] (LGDR x)))) - // result: (LGDR (CPSDR y x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XLGDR { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XLPDFR { - break - } - t := v_0_0.Type - y := v_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLDconst || v_1.AuxInt != 63 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XSRDconst || v_1_0.AuxInt != 63 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpS390XLGDR { - break - } - x := v_1_0_0.Args[0] - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) // cond: c & -1<<63 == 0 // result: (LGDR (CPSDR (FMOVDconst [c]) x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XSRDconst || v_0_0.AuxInt != 63 { + continue + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XLGDR { + continue + } + x := v_0_0_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(c&-1<<63 == 0) { + continue + } + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) + v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) + v1.AuxInt = c + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XSRDconst || v_0_0.AuxInt != 63 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpS390XLGDR { - break - } - x := v_0_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - if !(c&-1<<63 == 0) { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) - v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true + break } - // match: (OR (MOVDconst [c]) (SLDconst [63] (SRDconst [63] (LGDR x)))) - // cond: c & -1<<63 == 0 - // result: (LGDR (CPSDR (FMOVDconst [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XSLDconst || v_1.AuxInt != 63 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XSRDconst || v_1_0.AuxInt != 63 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpS390XLGDR { - break - } - x := v_1_0_0.Args[0] - if !(c&-1<<63 == 0) { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) - v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_10(v *Value) bool { - b := v.Block // match: (OR (AND (MOVDconst [-1<<63]) (LGDR x)) (LGDR (LPDFR y))) // result: (LGDR (CPSDR y x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XAND { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XAND { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_0_0 := v_0.Args[_i1] + if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { + continue + } + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpS390XLGDR { + continue + } + x := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XLGDR { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpS390XLPDFR { + continue + } + t := v_1_0.Type + y := v_1_0.Args[0] + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpS390XLGDR { - break - } - x := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XLGDR { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XLPDFR { - break - } - t := v_1_0.Type - y := v_1_0.Args[0] - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (AND (LGDR x) (MOVDconst [-1<<63])) (LGDR (LPDFR y))) - // result: (LGDR (CPSDR y x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XAND { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XLGDR { - break - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpS390XMOVDconst || v_0_1.AuxInt != -1<<63 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpS390XLGDR { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XLPDFR { - break - } - t := v_1_0.Type - y := v_1_0.Args[0] - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (LGDR (LPDFR y)) (AND (MOVDconst [-1<<63]) (LGDR x))) - // result: (LGDR (CPSDR y x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XLGDR { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XLPDFR { - break - } - t := v_0_0.Type - y := v_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst || v_1_0.AuxInt != -1<<63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XLGDR { - break - } - x := v_1_1.Args[0] - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (LGDR (LPDFR y)) (AND (LGDR x) (MOVDconst [-1<<63]))) - // result: (LGDR (CPSDR y x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XLGDR { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XLPDFR { - break - } - t := v_0_0.Type - y := v_0_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XLGDR { - break - } - x := v_1_0.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst || v_1_1.AuxInt != -1<<63 { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (OR (AND (MOVDconst [-1<<63]) (LGDR x)) (MOVDconst [c])) // cond: c & -1<<63 == 0 // result: (LGDR (CPSDR (FMOVDconst [c]) x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XAND { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XAND { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_0_0 := v_0.Args[_i1] + if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { + continue + } + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpS390XLGDR { + continue + } + x := v_0_1.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(c&-1<<63 == 0) { + continue + } + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) + v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) + v1.AuxInt = c + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpS390XLGDR { - break - } - x := v_0_1.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - if !(c&-1<<63 == 0) { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) - v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (AND (LGDR x) (MOVDconst [-1<<63])) (MOVDconst [c])) - // cond: c & -1<<63 == 0 - // result: (LGDR (CPSDR (FMOVDconst [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XAND { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XLGDR { - break - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpS390XMOVDconst || v_0_1.AuxInt != -1<<63 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - if !(c&-1<<63 == 0) { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) - v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (MOVDconst [c]) (AND (MOVDconst [-1<<63]) (LGDR x))) - // cond: c & -1<<63 == 0 - // result: (LGDR (CPSDR (FMOVDconst [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst || v_1_0.AuxInt != -1<<63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XLGDR { - break - } - x := v_1_1.Args[0] - if !(c&-1<<63 == 0) { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) - v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (OR (MOVDconst [c]) (AND (LGDR x) (MOVDconst [-1<<63]))) - // cond: c & -1<<63 == 0 - // result: (LGDR (CPSDR (FMOVDconst [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XLGDR { - break - } - x := v_1_0.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst || v_1_1.AuxInt != -1<<63 || !(c&-1<<63 == 0) { - break - } - v.reset(OpS390XLGDR) - v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) - v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (OR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c|d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XMOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c | d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c | d - return true + break } - // match: (OR (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c|d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c | d - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types // match: (OR x x) // result: x for { @@ -20259,7646 +16656,1187 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVDload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XORload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ORload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } + return false +} +func rewriteValueS390X_OpS390XOR_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (OR x1:(MOVBZload [i1] {s} p mem) sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem))) // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)) x1:(MOVBZload [i1] {s} p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR x1:(MOVHZload [i1] {s} p mem) sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem))) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVHZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVHZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)) x1:(MOVHZload [i1] {s} p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR x1:(MOVWZload [i1] {s} p mem) sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem))) // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVWZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVWZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)) x1:(MOVWZload [i1] {s} p mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVDload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := or.Args[_i1] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) y) s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) y)) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVHZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := or.Args[_i1] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVHZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) y) s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (OR x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_40(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (OR x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVHZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVHZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (OR x1:(MOVWZloadidx [i1] {s} p idx mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVWZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVWZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} idx p mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} p idx mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} idx p mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem)) x1:(MOVWZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem)) x1:(MOVWZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem)) x1:(MOVWZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_60(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem)) x1:(MOVWZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_70(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s1 := or.Args[_i2] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(idx) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y)) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVHZloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s1 := or.Args[_i2] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVHZloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(idx) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } return false } -func rewriteValueS390X_OpS390XOR_80(v *Value) bool { +func rewriteValueS390X_OpS390XOR_20(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_90(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } // match: (OR x0:(MOVBZload [i0] {s} p mem) sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem))) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)) x0:(MOVBZload [i0] {s} p mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) for { _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r0 := v.Args[_i0] + if r0.Op != OpS390XMOVHZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVHBRload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + continue + } + r1 := sh.Args[0] + if r1.Op != OpS390XMOVHZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVHBRload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem)))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) for { _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r0 := v.Args[_i0] + if r0.Op != OpS390XMOVWZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVWBRload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + continue + } + r1 := sh.Args[0] + if r1.Op != OpS390XMOVWZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVWBRload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))) r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVDBRload, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) y)) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := or.Args[_i1] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_100(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) y) s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) y)) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + r1 := s1.Args[0] + if r1.Op != OpS390XMOVHZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVHBRload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := or.Args[_i1] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + r0 := s0.Args[0] + if r0.Op != OpS390XMOVHZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVHBRload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (OR x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_110(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) for { _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r0 := v.Args[_i0] + if r0.Op != OpS390XMOVHZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVHBRloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { + continue + } + r1 := sh.Args[0] + if r1.Op != OpS390XMOVHZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVHBRloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_120(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r0 := v.Args[_i0] + if r0.Op != OpS390XMOVWZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVWBRloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { + continue + } + r1 := sh.Args[0] + if r1.Op != OpS390XMOVWZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVWBRloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_130(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_140(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s0 := or.Args[_i2] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(idx) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y)) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_150(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR_160(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpS390XSLDconst { + continue + } + j1 := s1.AuxInt + r1 := s1.Args[0] + if r1.Op != OpS390XMOVHZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVHBRloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpS390XOR { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s0 := or.Args[_i2] + if s0.Op != OpS390XSLDconst { + continue + } + j0 := s0.AuxInt + r0 := s0.Args[0] + if r0.Op != OpS390XMOVHZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVHBRloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(idx) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } return false } @@ -27909,78 +17847,46 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // result: (ORWconst [int64(int32(c))] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpS390XORWconst) + v.AuxInt = int64(int32(c)) + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpS390XORWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true + break } - // match: (ORW (MOVDconst [c]) x) - // result: (ORWconst [int64(int32(c))] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XORWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true - } - // match: (ORW (SLWconst x [c]) (SRWconst x [d])) + // match: ( ORW (SLWconst x [c]) (SRWconst x [d])) // cond: d == 32-c // result: (RLLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLWconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XSRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpS390XRLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (RLLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLWconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (ORW x x) // result: x @@ -28000,52 +17906,29 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -28053,4180 +17936,690 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWZload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (ORW x1:(MOVBZload [i1] {s} p mem) sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem))) // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)) x1:(MOVBZload [i1] {s} p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORW x1:(MOVHZload [i1] {s} p mem) sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem))) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVHZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVHZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)) x1:(MOVHZload [i1] {s} p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpS390XSLWconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + or := v.Args[1^_i0] + if or.Op != OpS390XORW { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s1 := or.Args[_i1] + if s1.Op != OpS390XSLWconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) y) s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORW x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORW x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x1 := v.Args[_i0] + if x1.Op != OpS390XMOVHZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + continue + } + x0 := sh.Args[0] + if x0.Op != OpS390XMOVHZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } return false } -func rewriteValueS390X_OpS390XORW_30(v *Value) bool { +func rewriteValueS390X_OpS390XORW_10(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) for { _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_40(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s0 := v.Args[_i0] + if s0.Op != OpS390XSLWconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpS390XORW { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s1 := or.Args[_i2] + if s1.Op != OpS390XSLWconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1.AuxInt = j1 + v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16) + v2.AuxInt = i0 + v2.Aux = s + v2.AddArg(p) + v2.AddArg(idx) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } // match: (ORW x0:(MOVBZload [i0] {s} p mem) sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem))) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) + return true } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)) x0:(MOVBZload [i0] {s} p mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) for { _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r0 := v.Args[_i0] + if r0.Op != OpS390XMOVHZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVHBRload { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[1] + p := x0.Args[0] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + continue + } + r1 := sh.Args[0] + if r1.Op != OpS390XMOVHZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVHBRload { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true + break } // match: (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) y)) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpS390XSLWconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZload { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[1] + p := x1.Args[0] + or := v.Args[1^_i0] + if or.Op != OpS390XORW { + continue + } + _ = or.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + s0 := or.Args[_i1] + if s0.Op != OpS390XSLWconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZload { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[1] + if p != x0.Args[0] || mem != x0.Args[1] { + continue + } + y := or.Args[1^_i1] + if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[1] - p := x1.Args[0] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) y) s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[1] - p := x0.Args[0] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + break } // match: (ORW x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) for { _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x0 := v.Args[_i0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { + continue + } + x1 := sh.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + } } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_60(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true + break } // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) for { _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break + for _i0 := 0; _i0 <= 1; _i0++ { + r0 := v.Args[_i0] + if r0.Op != OpS390XMOVHZreg { + continue + } + x0 := r0.Args[0] + if x0.Op != OpS390XMOVHBRloadidx { + continue + } + i0 := x0.AuxInt + s := x0.Aux + mem := x0.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x0.Args[_i1] + idx := x0.Args[1^_i1] + sh := v.Args[1^_i0] + if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { + continue + } + r1 := sh.Args[0] + if r1.Op != OpS390XMOVHZreg { + continue + } + x1 := r1.Args[0] + if x1.Op != OpS390XMOVHBRloadidx { + continue + } + i1 := x1.AuxInt + if x1.Aux != s { + continue + } + _ = x1.Args[2] + for _i2 := 0; _i2 <= 1; _i2++ { + if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + } } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_70(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - _ = v.Args[1] - sh := v.Args[0] - if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true + break } // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) for { _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - p := x1.Args[0] - idx := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XORW_80(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - mem := x1.Args[2] - idx := x1.Args[0] - p := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - _ = x0.Args[2] - if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[1] - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - p := x0.Args[0] - idx := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - _ = v.Args[1] - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - _ = or.Args[1] - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - mem := x0.Args[2] - idx := x0.Args[0] - p := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[2] - if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1, y) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true + for _i0 := 0; _i0 <= 1; _i0++ { + s1 := v.Args[_i0] + if s1.Op != OpS390XSLWconst { + continue + } + j1 := s1.AuxInt + x1 := s1.Args[0] + if x1.Op != OpS390XMOVBZloadidx { + continue + } + i1 := x1.AuxInt + s := x1.Aux + mem := x1.Args[2] + for _i1 := 0; _i1 <= 1; _i1++ { + p := x1.Args[_i1] + idx := x1.Args[1^_i1] + or := v.Args[1^_i0] + if or.Op != OpS390XORW { + continue + } + _ = or.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + s0 := or.Args[_i2] + if s0.Op != OpS390XSLWconst { + continue + } + j0 := s0.AuxInt + x0 := s0.Args[0] + if x0.Op != OpS390XMOVBZloadidx { + continue + } + i0 := x0.AuxInt + if x0.Aux != s { + continue + } + _ = x0.Args[2] + for _i3 := 0; _i3 <= 1; _i3++ { + if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + continue + } + y := or.Args[1^_i2] + if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + continue + } + b = mergePoint(b, x0, x1, y) + v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1.AuxInt = j0 + v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) + v3.AuxInt = i0 + v3.Aux = s + v3.AddArg(p) + v3.AddArg(idx) + v3.AddArg(mem) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v0.AddArg(y) + return true + } + } + } + } + break } return false } @@ -32502,29 +18895,6 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { } // match: (SLD x (AND (MOVDconst [c]) y)) // result: (SLD x (ANDWconst [c&63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst { - break - } - c := v_1_0.AuxInt - v.reset(OpS390XSLD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SLD x (AND y (MOVDconst [c]))) - // result: (SLD x (ANDWconst [c&63] y)) for { _ = v.Args[1] x := v.Args[0] @@ -32533,19 +18903,22 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := v_1_0.AuxInt + y := v_1.Args[1^_i0] + v.reset(OpS390XSLD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = c & 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_1_1.AuxInt - v.reset(OpS390XSLD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SLD x (ANDWconst [c] y)) // cond: c&63 == 63 @@ -32679,29 +19052,6 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { } // match: (SLW x (AND (MOVDconst [c]) y)) // result: (SLW x (ANDWconst [c&63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst { - break - } - c := v_1_0.AuxInt - v.reset(OpS390XSLW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SLW x (AND y (MOVDconst [c]))) - // result: (SLW x (ANDWconst [c&63] y)) for { _ = v.Args[1] x := v.Args[0] @@ -32710,19 +19060,22 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := v_1_0.AuxInt + y := v_1.Args[1^_i0] + v.reset(OpS390XSLW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = c & 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_1_1.AuxInt - v.reset(OpS390XSLW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SLW x (ANDWconst [c] y)) // cond: c&63 == 63 @@ -32856,29 +19209,6 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { } // match: (SRAD x (AND (MOVDconst [c]) y)) // result: (SRAD x (ANDWconst [c&63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst { - break - } - c := v_1_0.AuxInt - v.reset(OpS390XSRAD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SRAD x (AND y (MOVDconst [c]))) - // result: (SRAD x (ANDWconst [c&63] y)) for { _ = v.Args[1] x := v.Args[0] @@ -32887,19 +19217,22 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := v_1_0.AuxInt + y := v_1.Args[1^_i0] + v.reset(OpS390XSRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = c & 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_1_1.AuxInt - v.reset(OpS390XSRAD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SRAD x (ANDWconst [c] y)) // cond: c&63 == 63 @@ -33049,29 +19382,6 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { } // match: (SRAW x (AND (MOVDconst [c]) y)) // result: (SRAW x (ANDWconst [c&63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst { - break - } - c := v_1_0.AuxInt - v.reset(OpS390XSRAW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SRAW x (AND y (MOVDconst [c]))) - // result: (SRAW x (ANDWconst [c&63] y)) for { _ = v.Args[1] x := v.Args[0] @@ -33080,19 +19390,22 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := v_1_0.AuxInt + y := v_1.Args[1^_i0] + v.reset(OpS390XSRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = c & 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_1_1.AuxInt - v.reset(OpS390XSRAW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SRAW x (ANDWconst [c] y)) // cond: c&63 == 63 @@ -33242,29 +19555,6 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { } // match: (SRD x (AND (MOVDconst [c]) y)) // result: (SRD x (ANDWconst [c&63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst { - break - } - c := v_1_0.AuxInt - v.reset(OpS390XSRD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SRD x (AND y (MOVDconst [c]))) - // result: (SRD x (ANDWconst [c&63] y)) for { _ = v.Args[1] x := v.Args[0] @@ -33273,19 +19563,22 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := v_1_0.AuxInt + y := v_1.Args[1^_i0] + v.reset(OpS390XSRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = c & 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_1_1.AuxInt - v.reset(OpS390XSRD) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SRD x (ANDWconst [c] y)) // cond: c&63 == 63 @@ -33446,29 +19739,6 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { } // match: (SRW x (AND (MOVDconst [c]) y)) // result: (SRW x (ANDWconst [c&63] y)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XAND { - break - } - y := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpS390XMOVDconst { - break - } - c := v_1_0.AuxInt - v.reset(OpS390XSRW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (SRW x (AND y (MOVDconst [c]))) - // result: (SRW x (ANDWconst [c&63] y)) for { _ = v.Args[1] x := v.Args[0] @@ -33477,19 +19747,22 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { break } _ = v_1.Args[1] - y := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_1_0 := v_1.Args[_i0] + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := v_1_0.AuxInt + y := v_1.Args[1^_i0] + v.reset(OpS390XSRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = c & 63 + v0.AddArg(y) + v.AddArg(v0) + return true } - c := v_1_1.AuxInt - v.reset(OpS390XSRW) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) - v0.AuxInt = c & 63 - v0.AddArg(y) - v.AddArg(v0) - return true + break } // match: (SRW x (ANDWconst [c] y)) // cond: c&63 == 63 @@ -34241,121 +20514,70 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + if !(isU32Bit(c)) { + continue + } + v.reset(OpS390XXORconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_1.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpS390XXORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVDconst [c]) x) - // cond: isU32Bit(c) - // result: (XORconst [c] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpS390XXORconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (RLLGconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLDconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XSRDconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 64-c) { + continue + } + v.reset(OpS390XRLLGconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (RLLGconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XMOVDconst { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + d := v_1.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c ^ d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c ^ d - return true - } - // match: (XOR (MOVDconst [d]) (MOVDconst [c])) - // result: (MOVDconst [c^d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c ^ d - return true + break } // match: (XOR x x) // result: (MOVDconst [0]) @@ -34374,52 +20596,29 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVDload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XXORload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XOR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (XORload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } @@ -34428,78 +20627,46 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { // result: (XORWconst [int64(int32(c))] x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := v_1.AuxInt + v.reset(OpS390XXORWconst) + v.AuxInt = int64(int32(c)) + v.AddArg(x) + return true } - c := v_1.AuxInt - v.reset(OpS390XXORWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true - } - // match: (XORW (MOVDconst [c]) x) - // result: (XORWconst [int64(int32(c))] x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XXORWconst) - v.AuxInt = int64(int32(c)) - v.AddArg(x) - return true + break } // match: (XORW (SLWconst x [c]) (SRWconst x [d])) // cond: d == 32-c // result: (RLLconst [c] x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSLWconst { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpS390XSLWconst { + continue + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpS390XSRWconst { + continue + } + d := v_1.AuxInt + if x != v_1.Args[0] || !(d == 32-c) { + continue + } + v.reset(OpS390XRLLconst) + v.AuxInt = c + v.AddArg(x) + return true } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORW (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (RLLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpS390XSRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLWconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] || !(d == 32-c) { - break - } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true + break } // match: (XORW x x) // result: (MOVDconst [0]) @@ -34518,52 +20685,29 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XXORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (XORWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) @@ -34571,52 +20715,29 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + g := v.Args[1^_i0] + if g.Op != OpS390XMOVWZload { + continue + } + off := g.AuxInt + sym := g.Aux + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XXORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) - // result: (XORWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[1] - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - mem := g.Args[1] - ptr := g.Args[0] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true + break } return false } diff --git a/src/cmd/compile/internal/ssa/rewrite_rule_idea.txt b/src/cmd/compile/internal/ssa/rewrite_rule_idea.txt new file mode 100644 index 0000000000..003e4c02cd --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewrite_rule_idea.txt @@ -0,0 +1,17 @@ +idea: pack info about value substructure into an int64 +all values should be encoded as 1< (Mul16 x y) (Mul16 x z)) // result: (Mul16 x (Add16 y z)) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMul16 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + y := v_0.Args[1^_i1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + if x != v_1.Args[_i2] { + continue + } + z := v_1.Args[1^_i2] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - // match: (Add16 (Mul16 y x) (Mul16 x z)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Mul16 x y) (Mul16 z x)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Mul16 y x) (Mul16 z x)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Mul16 x z) (Mul16 x y)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Mul16 z x) (Mul16 x y)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Mul16 x z) (Mul16 y x)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Mul16 z x) (Mul16 y x)) - // result: (Mul16 x (Add16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpAdd16_10(v *Value) bool { - b := v.Block // match: (Add16 (Const16 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add16 x (Const16 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Add16 (Const16 [1]) (Com16 x)) // result: (Neg16 x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != 1 { + continue + } + v_1 := v.Args[1^_i0] + if v_1.Op != OpCom16 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg16) + v.AddArg(x) + return true } - v_1 := v.Args[1] - if v_1.Op != OpCom16 { - break - } - x := v_1.Args[0] - v.reset(OpNeg16) - v.AddArg(x) - return true - } - // match: (Add16 (Com16 x) (Const16 [1])) - // result: (Neg16 x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpCom16 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 1 { - break - } - v.reset(OpNeg16) - v.AddArg(x) - return true + break } // match: (Add16 (Add16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Add16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add16 (Add16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Add16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add16 x (Add16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Add16 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAdd16 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAdd16) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add16 x (Add16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Add16 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Add16 (Sub16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Sub16 x z)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 x (Sub16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Sub16 x z)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub16 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst16 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAdd16) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + return true } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - return false -} -func rewriteValuegeneric_OpAdd16_20(v *Value) bool { - b := v.Block // match: (Add16 (Sub16 z i:(Const16 )) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Sub16 (Add16 x z) i) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add16 x (Sub16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Sub16 (Add16 x z) i) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub16 { + continue + } + _ = v_0.Args[1] + z := v_0.Args[0] + i := v_0.Args[1] + if i.Op != OpConst16 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + v.AddArg(i) + return true } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true + break } // match: (Add16 (Const16 [c]) (Add16 (Const16 [d]) x)) // result: (Add16 (Const16 [int64(int16(c+d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c + d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Const16 [c]) (Add16 x (Const16 [d]))) - // result: (Add16 (Const16 [int64(int16(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Add16 (Const16 [d]) x) (Const16 [c])) - // result: (Add16 (Const16 [int64(int16(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Add16 x (Const16 [d])) (Const16 [c])) - // result: (Add16 (Const16 [int64(int16(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) // result: (Sub16 (Const16 [int64(int16(c+d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub16 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c + d)) + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Sub16 (Const16 [d]) x) (Const16 [c])) - // result: (Sub16 (Const16 [int64(int16(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add16 (Const16 [c]) (Sub16 x (Const16 [d]))) // result: (Add16 (Const16 [int64(int16(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub16 { + continue + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 || v_1_1.Type != t { + continue + } + d := v_1_1.AuxInt + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Sub16 x (Const16 [d])) (Const16 [c])) - // result: (Add16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -1244,762 +765,283 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // result: (Const32 [int64(int32(c+d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(c + d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c + d)) - return true - } - // match: (Add32 (Const32 [d]) (Const32 [c])) - // result: (Const32 [int64(int32(c+d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c + d)) - return true + break } // match: (Add32 (Mul32 x y) (Mul32 x z)) // result: (Mul32 x (Add32 y z)) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMul32 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + y := v_0.Args[1^_i1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + if x != v_1.Args[_i2] { + continue + } + z := v_1.Args[1^_i2] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - // match: (Add32 (Mul32 y x) (Mul32 x z)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Mul32 x y) (Mul32 z x)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Mul32 y x) (Mul32 z x)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Mul32 x z) (Mul32 x y)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Mul32 z x) (Mul32 x y)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Mul32 x z) (Mul32 y x)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Mul32 z x) (Mul32 y x)) - // result: (Mul32 x (Add32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpAdd32_10(v *Value) bool { - b := v.Block // match: (Add32 (Const32 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add32 x (Const32 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Add32 (Const32 [1]) (Com32 x)) // result: (Neg32 x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 1 { + continue + } + v_1 := v.Args[1^_i0] + if v_1.Op != OpCom32 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg32) + v.AddArg(x) + return true } - v_1 := v.Args[1] - if v_1.Op != OpCom32 { - break - } - x := v_1.Args[0] - v.reset(OpNeg32) - v.AddArg(x) - return true - } - // match: (Add32 (Com32 x) (Const32 [1])) - // result: (Neg32 x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpCom32 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 1 { - break - } - v.reset(OpNeg32) - v.AddArg(x) - return true + break } // match: (Add32 (Add32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Add32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add32 (Add32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Add32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add32 x (Add32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Add32 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAdd32 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAdd32) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add32 x (Add32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Add32 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Add32 (Sub32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Sub32 x z)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 x (Sub32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Sub32 x z)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub32 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst32 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAdd32) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + return true } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - return false -} -func rewriteValuegeneric_OpAdd32_20(v *Value) bool { - b := v.Block // match: (Add32 (Sub32 z i:(Const32 )) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Sub32 (Add32 x z) i) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add32 x (Sub32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Sub32 (Add32 x z) i) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub32 { + continue + } + _ = v_0.Args[1] + z := v_0.Args[0] + i := v_0.Args[1] + if i.Op != OpConst32 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + v.AddArg(i) + return true } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true + break } // match: (Add32 (Const32 [c]) (Add32 (Const32 [d]) x)) // result: (Add32 (Const32 [int64(int32(c+d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c + d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Const32 [c]) (Add32 x (Const32 [d]))) - // result: (Add32 (Const32 [int64(int32(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Add32 (Const32 [d]) x) (Const32 [c])) - // result: (Add32 (Const32 [int64(int32(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Add32 x (Const32 [d])) (Const32 [c])) - // result: (Add32 (Const32 [int64(int32(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) // result: (Sub32 (Const32 [int64(int32(c+d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub32 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c + d)) + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Sub32 (Const32 [d]) x) (Const32 [c])) - // result: (Sub32 (Const32 [int64(int32(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add32 (Const32 [c]) (Sub32 x (Const32 [d]))) // result: (Add32 (Const32 [int64(int32(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub32 { + continue + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 || v_1_1.Type != t { + continue + } + d := v_1_1.AuxInt + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Sub32 x (Const32 [d])) (Const32 [c])) - // result: (Add32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -2008,37 +1050,22 @@ func rewriteValuegeneric_OpAdd32F_0(v *Value) bool { // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F { + continue + } + d := v_1.AuxInt + v.reset(OpConst32F) + v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - d := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d)) - return true - } - // match: (Add32F (Const32F [d]) (Const32F [c])) - // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - c := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d)) - return true + break } return false } @@ -2048,762 +1075,283 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // result: (Const64 [c+d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConst64) + v.AuxInt = c + d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c + d - return true - } - // match: (Add64 (Const64 [d]) (Const64 [c])) - // result: (Const64 [c+d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c + d - return true + break } // match: (Add64 (Mul64 x y) (Mul64 x z)) // result: (Mul64 x (Add64 y z)) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMul64 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + y := v_0.Args[1^_i1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + if x != v_1.Args[_i2] { + continue + } + z := v_1.Args[1^_i2] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - // match: (Add64 (Mul64 y x) (Mul64 x z)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Mul64 x y) (Mul64 z x)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Mul64 y x) (Mul64 z x)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Mul64 x z) (Mul64 x y)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Mul64 z x) (Mul64 x y)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Mul64 x z) (Mul64 y x)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Mul64 z x) (Mul64 y x)) - // result: (Mul64 x (Add64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpAdd64_10(v *Value) bool { - b := v.Block // match: (Add64 (Const64 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add64 x (Const64 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Add64 (Const64 [1]) (Com64 x)) // result: (Neg64 x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 1 { + continue + } + v_1 := v.Args[1^_i0] + if v_1.Op != OpCom64 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg64) + v.AddArg(x) + return true } - v_1 := v.Args[1] - if v_1.Op != OpCom64 { - break - } - x := v_1.Args[0] - v.reset(OpNeg64) - v.AddArg(x) - return true - } - // match: (Add64 (Com64 x) (Const64 [1])) - // result: (Neg64 x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpCom64 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 1 { - break - } - v.reset(OpNeg64) - v.AddArg(x) - return true + break } // match: (Add64 (Add64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Add64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add64 (Add64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Add64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add64 x (Add64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Add64 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAdd64 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAdd64) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add64 x (Add64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Add64 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Add64 (Sub64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Sub64 x z)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 x (Sub64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Sub64 x z)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub64 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst64 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAdd64) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + return true } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - return false -} -func rewriteValuegeneric_OpAdd64_20(v *Value) bool { - b := v.Block // match: (Add64 (Sub64 z i:(Const64 )) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Sub64 (Add64 x z) i) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add64 x (Sub64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Sub64 (Add64 x z) i) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub64 { + continue + } + _ = v_0.Args[1] + z := v_0.Args[0] + i := v_0.Args[1] + if i.Op != OpConst64 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + v.AddArg(i) + return true } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true + break } // match: (Add64 (Const64 [c]) (Add64 (Const64 [d]) x)) // result: (Add64 (Const64 [c+d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Const64 [c]) (Add64 x (Const64 [d]))) - // result: (Add64 (Const64 [c+d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Add64 (Const64 [d]) x) (Const64 [c])) - // result: (Add64 (Const64 [c+d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Add64 x (Const64 [d])) (Const64 [c])) - // result: (Add64 (Const64 [c+d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) // result: (Sub64 (Const64 [c+d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub64 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + d + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Sub64 (Const64 [d]) x) (Const64 [c])) - // result: (Sub64 (Const64 [c+d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add64 (Const64 [c]) (Sub64 x (Const64 [d]))) // result: (Add64 (Const64 [c-d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub64 { + continue + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != t { + continue + } + d := v_1_1.AuxInt + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - d + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Sub64 x (Const64 [d])) (Const64 [c])) - // result: (Add64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -2812,37 +1360,22 @@ func rewriteValuegeneric_OpAdd64F_0(v *Value) bool { // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F { + continue + } + d := v_1.AuxInt + v.reset(OpConst64F) + v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - d := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d)) - return true - } - // match: (Add64F (Const64F [d]) (Const64F [c])) - // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - c := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d)) - return true + break } return false } @@ -2852,762 +1385,283 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // result: (Const8 [int64(int8(c+d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConst8) + v.AuxInt = int64(int8(c + d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c + d)) - return true - } - // match: (Add8 (Const8 [d]) (Const8 [c])) - // result: (Const8 [int64(int8(c+d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c + d)) - return true + break } // match: (Add8 (Mul8 x y) (Mul8 x z)) // result: (Mul8 x (Add8 y z)) for { t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMul8 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + y := v_0.Args[1^_i1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + if x != v_1.Args[_i2] { + continue + } + z := v_1.Args[1^_i2] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + } } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - // match: (Add8 (Mul8 y x) (Mul8 x z)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Mul8 x y) (Mul8 z x)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Mul8 y x) (Mul8 z x)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Mul8 x z) (Mul8 x y)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Mul8 z x) (Mul8 x y)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Mul8 x z) (Mul8 y x)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - z := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Mul8 z x) (Mul8 y x)) - // result: (Mul8 x (Add8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - z := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpAdd8_10(v *Value) bool { - b := v.Block // match: (Add8 (Const8 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add8 x (Const8 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Add8 (Const8 [1]) (Com8 x)) // result: (Neg8 x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 1 { + continue + } + v_1 := v.Args[1^_i0] + if v_1.Op != OpCom8 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg8) + v.AddArg(x) + return true } - v_1 := v.Args[1] - if v_1.Op != OpCom8 { - break - } - x := v_1.Args[0] - v.reset(OpNeg8) - v.AddArg(x) - return true - } - // match: (Add8 (Com8 x) (Const8 [1])) - // result: (Neg8 x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpCom8 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 1 { - break - } - v.reset(OpNeg8) - v.AddArg(x) - return true + break } // match: (Add8 (Add8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Add8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add8 (Add8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Add8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add8 x (Add8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Add8 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAdd8 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAdd8) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add8 x (Add8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Add8 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Add8 (Sub8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Sub8 x z)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 x (Sub8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Sub8 x z)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub8 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst8 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAdd8) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + return true } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true + break } - return false -} -func rewriteValuegeneric_OpAdd8_20(v *Value) bool { - b := v.Block // match: (Add8 (Sub8 z i:(Const8 )) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Sub8 (Add8 x z) i) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add8 x (Sub8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Sub8 (Add8 x z) i) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpSub8 { + continue + } + _ = v_0.Args[1] + z := v_0.Args[0] + i := v_0.Args[1] + if i.Op != OpConst8 { + continue + } + t := i.Type + x := v.Args[1^_i0] + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(x) + v0.AddArg(z) + v.AddArg(v0) + v.AddArg(i) + return true } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true + break } // match: (Add8 (Const8 [c]) (Add8 (Const8 [d]) x)) // result: (Add8 (Const8 [int64(int8(c+d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c + d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Const8 [c]) (Add8 x (Const8 [d]))) - // result: (Add8 (Const8 [int64(int8(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Add8 (Const8 [d]) x) (Const8 [c])) - // result: (Add8 (Const8 [int64(int8(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Add8 x (Const8 [d])) (Const8 [c])) - // result: (Add8 (Const8 [int64(int8(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // result: (Sub8 (Const8 [int64(int8(c+d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub8 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c + d)) + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Sub8 (Const8 [d]) x) (Const8 [c])) - // result: (Sub8 (Const8 [int64(int8(c+d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Add8 (Const8 [c]) (Sub8 x (Const8 [d]))) // result: (Add8 (Const8 [int64(int8(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpSub8 { + continue + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 || v_1_1.Type != t { + continue + } + d := v_1_1.AuxInt + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Sub8 x (Const8 [d])) (Const8 [c])) - // result: (Add8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -3649,149 +1703,87 @@ func rewriteValuegeneric_OpAddPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpAnd16_0(v *Value) bool { + b := v.Block // match: (And16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c&d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + v.reset(OpConst16) + v.AuxInt = int64(int16(c & d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c & d)) - return true - } - // match: (And16 (Const16 [d]) (Const16 [c])) - // result: (Const16 [int64(int16(c&d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c & d)) - return true + break } // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) // cond: c >= 64-ntz(m) // result: (Const16 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-ntz(m)) { + continue + } + v.reset(OpConst16) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpRsh16Ux64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true - } - // match: (And16 (Rsh16Ux64 _ (Const64 [c])) (Const16 [m])) - // cond: c >= 64-ntz(m) - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpRsh16Ux64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - m := v_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true + break } // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) // cond: c >= 64-nlz(m) // result: (Const16 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh16x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-nlz(m)) { + continue + } + v.reset(OpConst16) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpLsh16x64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true - } - // match: (And16 (Lsh16x64 _ (Const64 [c])) (Const16 [m])) - // cond: c >= 64-nlz(m) - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh16x64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - m := v_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true + break } // match: (And16 x x) // result: x @@ -3807,499 +1799,210 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { } // match: (And16 (Const16 [-1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (And16 x (Const16 [-1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (And16 (Const16 [0]) _) // result: (Const16 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst16) + v.AuxInt = 0 + return true } - v.reset(OpConst16) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValuegeneric_OpAnd16_10(v *Value) bool { - b := v.Block - // match: (And16 _ (Const16 [0])) - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true + break } // match: (And16 x (And16 x y)) // result: (And16 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpAnd16) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpAnd16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And16 x (And16 y x)) - // result: (And16 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpAnd16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And16 (And16 x y) x) - // result: (And16 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpAnd16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And16 (And16 y x) x) - // result: (And16 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpAnd16) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (And16 (And16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (And16 i (And16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And16 (And16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (And16 i (And16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And16 x (And16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (And16 i (And16 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd16 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAnd16) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And16 x (And16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (And16 i (And16 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (And16 (Const16 [c]) (And16 (Const16 [d]) x)) // result: (And16 (Const16 [int64(int16(c&d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c & d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd16_20(v *Value) bool { - b := v.Block - // match: (And16 (Const16 [c]) (And16 x (Const16 [d]))) - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And16 (And16 (Const16 [d]) x) (Const16 [c])) - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And16 (And16 x (Const16 [d])) (Const16 [c])) - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } func rewriteValuegeneric_OpAnd32_0(v *Value) bool { + b := v.Block // match: (And32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c&d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(c & d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c & d)) - return true - } - // match: (And32 (Const32 [d]) (Const32 [c])) - // result: (Const32 [int64(int32(c&d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c & d)) - return true + break } // match: (And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) // cond: c >= 64-ntz(m) // result: (Const32 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-ntz(m)) { + continue + } + v.reset(OpConst32) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpRsh32Ux64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } - // match: (And32 (Rsh32Ux64 _ (Const64 [c])) (Const32 [m])) - // cond: c >= 64-ntz(m) - // result: (Const32 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpRsh32Ux64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - m := v_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true + break } // match: (And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) // cond: c >= 64-nlz(m) // result: (Const32 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh32x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-nlz(m)) { + continue + } + v.reset(OpConst32) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpLsh32x64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } - // match: (And32 (Lsh32x64 _ (Const64 [c])) (Const32 [m])) - // cond: c >= 64-nlz(m) - // result: (Const32 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh32x64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - m := v_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true + break } // match: (And32 x x) // result: x @@ -4315,499 +2018,210 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { } // match: (And32 (Const32 [-1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (And32 x (Const32 [-1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (And32 (Const32 [0]) _) // result: (Const32 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst32) + v.AuxInt = 0 + return true } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValuegeneric_OpAnd32_10(v *Value) bool { - b := v.Block - // match: (And32 _ (Const32 [0])) - // result: (Const32 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true + break } // match: (And32 x (And32 x y)) // result: (And32 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpAnd32) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpAnd32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And32 x (And32 y x)) - // result: (And32 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpAnd32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And32 (And32 x y) x) - // result: (And32 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpAnd32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And32 (And32 y x) x) - // result: (And32 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpAnd32) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (And32 (And32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (And32 i (And32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And32 (And32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (And32 i (And32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And32 x (And32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (And32 i (And32 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd32 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAnd32) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And32 x (And32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (And32 i (And32 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (And32 (Const32 [c]) (And32 (Const32 [d]) x)) // result: (And32 (Const32 [int64(int32(c&d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c & d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd32_20(v *Value) bool { - b := v.Block - // match: (And32 (Const32 [c]) (And32 x (Const32 [d]))) - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And32 (And32 (Const32 [d]) x) (Const32 [c])) - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And32 (And32 x (Const32 [d])) (Const32 [c])) - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } func rewriteValuegeneric_OpAnd64_0(v *Value) bool { + b := v.Block // match: (And64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c&d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConst64) + v.AuxInt = c & d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c & d - return true - } - // match: (And64 (Const64 [d]) (Const64 [c])) - // result: (Const64 [c&d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c & d - return true + break } // match: (And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) // cond: c >= 64-ntz(m) // result: (Const64 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-ntz(m)) { + continue + } + v.reset(OpConst64) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpRsh64Ux64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true - } - // match: (And64 (Rsh64Ux64 _ (Const64 [c])) (Const64 [m])) - // cond: c >= 64-ntz(m) - // result: (Const64 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpRsh64Ux64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - m := v_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true + break } // match: (And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) // cond: c >= 64-nlz(m) // result: (Const64 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh64x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-nlz(m)) { + continue + } + v.reset(OpConst64) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpLsh64x64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true - } - // match: (And64 (Lsh64x64 _ (Const64 [c])) (Const64 [m])) - // cond: c >= 64-nlz(m) - // result: (Const64 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh64x64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - m := v_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true + break } // match: (And64 x x) // result: x @@ -4823,499 +2237,210 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { } // match: (And64 (Const64 [-1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (And64 x (Const64 [-1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (And64 (Const64 [0]) _) // result: (Const64 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst64) + v.AuxInt = 0 + return true } - v.reset(OpConst64) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValuegeneric_OpAnd64_10(v *Value) bool { - b := v.Block - // match: (And64 _ (Const64 [0])) - // result: (Const64 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true + break } // match: (And64 x (And64 x y)) // result: (And64 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpAnd64) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpAnd64) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And64 x (And64 y x)) - // result: (And64 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpAnd64) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And64 (And64 x y) x) - // result: (And64 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpAnd64) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And64 (And64 y x) x) - // result: (And64 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpAnd64) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (And64 (And64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (And64 i (And64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And64 (And64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (And64 i (And64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And64 x (And64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (And64 i (And64 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd64 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAnd64) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And64 x (And64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (And64 i (And64 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (And64 (Const64 [c]) (And64 (Const64 [d]) x)) // result: (And64 (Const64 [c&d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c & d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd64_20(v *Value) bool { - b := v.Block - // match: (And64 (Const64 [c]) (And64 x (Const64 [d]))) - // result: (And64 (Const64 [c&d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And64 (And64 (Const64 [d]) x) (Const64 [c])) - // result: (And64 (Const64 [c&d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And64 (And64 x (Const64 [d])) (Const64 [c])) - // result: (And64 (Const64 [c&d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } func rewriteValuegeneric_OpAnd8_0(v *Value) bool { + b := v.Block // match: (And8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c&d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConst8) + v.AuxInt = int64(int8(c & d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c & d)) - return true - } - // match: (And8 (Const8 [d]) (Const8 [c])) - // result: (Const8 [int64(int8(c&d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c & d)) - return true + break } // match: (And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) // cond: c >= 64-ntz(m) // result: (Const8 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpRsh8Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-ntz(m)) { + continue + } + v.reset(OpConst8) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpRsh8Ux64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true - } - // match: (And8 (Rsh8Ux64 _ (Const64 [c])) (Const8 [m])) - // cond: c >= 64-ntz(m) - // result: (Const8 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpRsh8Ux64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - m := v_1.AuxInt - if !(c >= 64-ntz(m)) { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true + break } // match: (And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) // cond: c >= 64-nlz(m) // result: (Const8 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + m := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh8x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := v_1_1.AuxInt + if !(c >= 64-nlz(m)) { + continue + } + v.reset(OpConst8) + v.AuxInt = 0 + return true } - m := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpLsh8x64 { - break - } - _ = v_1.Args[1] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true - } - // match: (And8 (Lsh8x64 _ (Const64 [c])) (Const8 [m])) - // cond: c >= 64-nlz(m) - // result: (Const8 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh8x64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - m := v_1.AuxInt - if !(c >= 64-nlz(m)) { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true + break } // match: (And8 x x) // result: x @@ -5331,352 +2456,125 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { } // match: (And8 (Const8 [-1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (And8 x (Const8 [-1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (And8 (Const8 [0]) _) // result: (Const8 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst8) + v.AuxInt = 0 + return true } - v.reset(OpConst8) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValuegeneric_OpAnd8_10(v *Value) bool { - b := v.Block - // match: (And8 _ (Const8 [0])) - // result: (Const8 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true + break } // match: (And8 x (And8 x y)) // result: (And8 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpAnd8) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpAnd8) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And8 x (And8 y x)) - // result: (And8 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpAnd8) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And8 (And8 x y) x) - // result: (And8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpAnd8) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (And8 (And8 y x) x) - // result: (And8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpAnd8) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (And8 (And8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (And8 i (And8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And8 (And8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (And8 i (And8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And8 x (And8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (And8 i (And8 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd8 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAnd8) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And8 x (And8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (And8 i (And8 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (And8 (Const8 [c]) (And8 (Const8 [d]) x)) // result: (And8 (Const8 [int64(int8(c&d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAnd8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c & d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd8_20(v *Value) bool { - b := v.Block - // match: (And8 (Const8 [c]) (And8 x (Const8 [d]))) - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And8 (And8 (Const8 [d]) x) (Const8 [c])) - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And8 (And8 x (Const8 [d])) (Const8 [c])) - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -5739,36 +2637,23 @@ func rewriteValuegeneric_OpCom16_0(v *Value) bool { } // match: (Com16 (Add16 (Const16 [-1]) x)) // result: (Neg16 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 || v_0_0.AuxInt != -1 { - break - } - v.reset(OpNeg16) - v.AddArg(x) - return true - } - // match: (Com16 (Add16 x (Const16 [-1]))) - // result: (Neg16 x) for { v_0 := v.Args[0] if v_0.Op != OpAdd16 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst16 || v_0_0.AuxInt != -1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpNeg16) + v.AddArg(x) + return true } - v.reset(OpNeg16) - v.AddArg(x) - return true + break } return false } @@ -5800,36 +2685,23 @@ func rewriteValuegeneric_OpCom32_0(v *Value) bool { } // match: (Com32 (Add32 (Const32 [-1]) x)) // result: (Neg32 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 || v_0_0.AuxInt != -1 { - break - } - v.reset(OpNeg32) - v.AddArg(x) - return true - } - // match: (Com32 (Add32 x (Const32 [-1]))) - // result: (Neg32 x) for { v_0 := v.Args[0] if v_0.Op != OpAdd32 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst32 || v_0_0.AuxInt != -1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpNeg32) + v.AddArg(x) + return true } - v.reset(OpNeg32) - v.AddArg(x) - return true + break } return false } @@ -5861,36 +2733,23 @@ func rewriteValuegeneric_OpCom64_0(v *Value) bool { } // match: (Com64 (Add64 (Const64 [-1]) x)) // result: (Neg64 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 || v_0_0.AuxInt != -1 { - break - } - v.reset(OpNeg64) - v.AddArg(x) - return true - } - // match: (Com64 (Add64 x (Const64 [-1]))) - // result: (Neg64 x) for { v_0 := v.Args[0] if v_0.Op != OpAdd64 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst64 || v_0_0.AuxInt != -1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpNeg64) + v.AddArg(x) + return true } - v.reset(OpNeg64) - v.AddArg(x) - return true + break } return false } @@ -5922,36 +2781,23 @@ func rewriteValuegeneric_OpCom8_0(v *Value) bool { } // match: (Com8 (Add8 (Const8 [-1]) x)) // result: (Neg8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 || v_0_0.AuxInt != -1 { - break - } - v.reset(OpNeg8) - v.AddArg(x) - return true - } - // match: (Com8 (Add8 x (Const8 [-1]))) - // result: (Neg8 x) for { v_0 := v.Args[0] if v_0.Op != OpAdd8 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 || v_0_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst8 || v_0_0.AuxInt != -1 { + continue + } + x := v_0.Args[1^_i0] + v.reset(OpNeg8) + v.AddArg(x) + return true } - v.reset(OpNeg8) - v.AddArg(x) - return true + break } return false } @@ -6091,29 +2937,6 @@ func rewriteValuegeneric_OpConstString_0(v *Value) bool { func rewriteValuegeneric_OpConvert_0(v *Value) bool { // match: (Convert (Add64 (Convert ptr mem) off) mem) // result: (Add64 ptr off) - for { - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - off := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConvert { - break - } - _ = v_0_0.Args[1] - ptr := v_0_0.Args[0] - if mem != v_0_0.Args[1] { - break - } - v.reset(OpAdd64) - v.AddArg(ptr) - v.AddArg(off) - return true - } - // match: (Convert (Add64 off (Convert ptr mem)) mem) - // result: (Add64 ptr off) for { mem := v.Args[1] v_0 := v.Args[0] @@ -6121,46 +2944,26 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { break } _ = v_0.Args[1] - off := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConvert { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConvert { + continue + } + _ = v_0_0.Args[1] + ptr := v_0_0.Args[0] + if mem != v_0_0.Args[1] { + continue + } + off := v_0.Args[1^_i0] + v.reset(OpAdd64) + v.AddArg(ptr) + v.AddArg(off) + return true } - _ = v_0_1.Args[1] - ptr := v_0_1.Args[0] - if mem != v_0_1.Args[1] { - break - } - v.reset(OpAdd64) - v.AddArg(ptr) - v.AddArg(off) - return true + break } // match: (Convert (Add32 (Convert ptr mem) off) mem) // result: (Add32 ptr off) - for { - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - off := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConvert { - break - } - _ = v_0_0.Args[1] - ptr := v_0_0.Args[0] - if mem != v_0_0.Args[1] { - break - } - v.reset(OpAdd32) - v.AddArg(ptr) - v.AddArg(off) - return true - } - // match: (Convert (Add32 off (Convert ptr mem)) mem) - // result: (Add32 ptr off) for { mem := v.Args[1] v_0 := v.Args[0] @@ -6168,20 +2971,23 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { break } _ = v_0.Args[1] - off := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConvert { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConvert { + continue + } + _ = v_0_0.Args[1] + ptr := v_0_0.Args[0] + if mem != v_0_0.Args[1] { + continue + } + off := v_0.Args[1^_i0] + v.reset(OpAdd32) + v.AddArg(ptr) + v.AddArg(off) + return true } - _ = v_0_1.Args[1] - ptr := v_0_1.Args[0] - if mem != v_0_1.Args[1] { - break - } - v.reset(OpAdd32) - v.AddArg(ptr) - v.AddArg(off) - return true + break } // match: (Convert (Convert ptr mem) mem) // result: ptr @@ -7921,3658 +4727,666 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // result: (Eq16 (Const16 [int64(int16(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq16 (Const16 [c]) (Add16 x (Const16 [d]))) - // result: (Eq16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq16 (Add16 (Const16 [d]) x) (Const16 [c])) - // result: (Eq16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq16 (Add16 x (Const16 [d])) (Const16 [c])) - // result: (Eq16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Eq16 (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (Eq16 (Const16 [d]) (Const16 [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } // match: (Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) // cond: x.Op != OpConst16 && udivisibleOK(16,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32u (ZeroExt16to32 x) (Const32 [c&0xffff])) (Const32 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMod16u { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMod16u { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst16 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && udivisibleOK(16, c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = c & 0xffff + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = 0 + v.AddArg(v3) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && udivisibleOK(16, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = c & 0xffff - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = 0 - v.AddArg(v3) - return true - } - // match: (Eq16 (Const16 [0]) (Mod16u x (Const16 [c]))) - // cond: x.Op != OpConst16 && udivisibleOK(16,c) && !hasSmallRotate(config) - // result: (Eq32 (Mod32u (ZeroExt16to32 x) (Const32 [c&0xffff])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpMod16u { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(x.Op != OpConst16 && udivisibleOK(16, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = c & 0xffff - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = 0 - v.AddArg(v3) - return true + break } // match: (Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) // cond: x.Op != OpConst16 && sdivisibleOK(16,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32 (SignExt16to32 x) (Const32 [c])) (Const32 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMod16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMod16 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst16 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && sdivisibleOK(16, c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v2.AuxInt = c + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v3.AuxInt = 0 + v.AddArg(v3) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc64to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int64(int16(udivisible(16, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int64(16 - udivisible(16, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int64(int16(udivisible(16, c).max)) + v.AddArg(v4) + return true + } + } } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && sdivisibleOK(16, c) && !hasSmallRotate(config)) { - break + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc32to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int64(int16(udivisible(16, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int64(16 - udivisible(16, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int64(int16(udivisible(16, c).max)) + v.AddArg(v4) + return true + } + } } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v2.AuxInt = c - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v3.AuxInt = 0 - v.AddArg(v3) - return true + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc32to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpRsh32Ux64 { + continue + } + _ = mul_1.Args[1] + mul_1_0 := mul_1.Args[0] + if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int64(int16(udivisible(16, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int64(16 - udivisible(16, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int64(int16(udivisible(16, c).max)) + v.AddArg(v4) + return true + } + } + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc32to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAvg32u { + continue + } + _ = v_1_1_0_0.Args[1] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpLsh32x64 { + continue + } + _ = v_1_1_0_0_0.Args[1] + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] { + continue + } + v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] + if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 16 { + continue + } + mul := v_1_1_0_0.Args[1] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int64(int16(udivisible(16, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int64(16 - udivisible(16, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int64(int16(udivisible(16, c).max)) + v.AddArg(v4) + return true + } + } + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) + // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub16 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int64(int16(sdivisible(16, c).m)) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int64(int16(sdivisible(16, c).a)) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v5.AuxInt = int64(16 - sdivisible(16, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v6.AuxInt = int64(int16(sdivisible(16, c).max)) + v.AddArg(v6) + return true + } + } + } + break } return false } func rewriteValuegeneric_OpEq16_10(v *Value) bool { b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types - // match: (Eq16 (Const16 [0]) (Mod16 x (Const16 [c]))) - // cond: x.Op != OpConst16 && sdivisibleOK(16,c) && !hasSmallRotate(config) - // result: (Eq32 (Mod32 (SignExt16to32 x) (Const32 [c])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpMod16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(x.Op != OpConst16 && sdivisibleOK(16, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v2.AuxInt = c - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v3.AuxInt = 0 - v.AddArg(v3) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (ZeroExt16to64 x) (Const64 [m])) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (ZeroExt16to64 x) (Const64 [m])) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (ZeroExt16to64 x) (Const64 [m])) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (ZeroExt16to64 x) (Const64 [m])) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq16_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (ZeroExt16to32 x) (Const32 [m])) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (ZeroExt16to32 x) (Const32 [m])) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (ZeroExt16to32 x) (Const32 [m])) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (ZeroExt16to32 x) (Const32 [m])) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])) (Const32 [m])) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt16to32 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq16_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq16 x (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])) (Const32 [m])) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt16to32 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])) (Const32 [m])) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt16to32 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])) (Const32 [m])) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt16to32 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAvg32u { - break - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_1_1_0_0_0.Args[1] - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] { - break - } - v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] - if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 16 { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (ZeroExt16to32 x) (Const32 [m]))) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to16 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAvg32u { - break - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_1_1_0_0_0.Args[1] - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] { - break - } - v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] - if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 16 { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAvg32u { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_1_0_0_0_0.Args[1] - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.AuxInt != 16 { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (ZeroExt16to32 x) (Const32 [m]))) (Const64 [s]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to16 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAvg32u { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_1_0_0_0_0.Args[1] - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.AuxInt != 16 { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAvg32u { - break - } - _ = v_0_1_0_0.Args[1] - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_0_1_0_0_0.Args[1] - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_0_1_0_0_0_0.Args[0] { - break - } - v_0_1_0_0_0_1 := v_0_1_0_0_0.Args[1] - if v_0_1_0_0_0_1.Op != OpConst64 || v_0_1_0_0_0_1.AuxInt != 16 { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq16_40(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq16 (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (ZeroExt16to32 x) (Const32 [m]))) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to16 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAvg32u { - break - } - _ = v_0_1_0_0.Args[1] - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_0_1_0_0_0.Args[1] - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_0_1_0_0_0_0.Args[0] { - break - } - v_0_1_0_0_0_1 := v_0_1_0_0_0.Args[1] - if v_0_1_0_0_0_1.Op != OpConst64 || v_0_1_0_0_0_1.AuxInt != 16 { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAvg32u { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_0_0_0_0_0.Args[1] - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpZeroExt16to32 || x != v_0_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.AuxInt != 16 { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 (Mul16 (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (ZeroExt16to32 x) (Const32 [m]))) (Const64 [s]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAvg32u { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpLsh32x64 { - break - } - _ = v_0_0_0_0_0.Args[1] - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpZeroExt16to32 || x != v_0_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.AuxInt != 16 { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int64(int16(udivisible(16, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(16 - udivisible(16, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(udivisible(16, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub16 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (SignExt16to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub16 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 x (Mul16 (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub16 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpSignExt16to32 || x != v_1_0_1_0.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 x (Mul16 (Sub16 (Rsh32x64 mul:(Mul32 (SignExt16to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) (Const16 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub16 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpSignExt16to32 || x != v_1_0_1_0.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub16 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpSignExt16to32 || x != v_0_1_1_0.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (SignExt16to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub16 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpSignExt16to32 || x != v_0_1_1_0.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 (Mul16 (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub16 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpSignExt16to32 || x != v_0_0_1_0.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - return false -} -func rewriteValuegeneric_OpEq16_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq16 (Mul16 (Sub16 (Rsh32x64 mul:(Mul32 (SignExt16to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) (Const16 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub16 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt16to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpSignExt16to32 || x != v_0_0_1_0.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) { - break - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int64(int16(sdivisible(16, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int64(int16(sdivisible(16, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int64(16 - sdivisible(16, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int64(int16(sdivisible(16, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 15 && kbar == 16 - k // result: (Eq16 (And16 n (Const16 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh16x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh16x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh16x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd16 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 15 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int64(1< 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Eq16 (And16 n (Const16 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh16x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh16x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd16 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh16Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh16x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 15 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Eq16 (And16 n (Const16 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh16x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh16x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd16 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh16Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh16x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 15 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Eq16 (And16 n (Const16 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh16x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh16x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd16 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh16Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh16x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 15 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< x (Const16 [y])) (Const16 [y])) // cond: isPowerOfTwo(y) // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd16 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst16 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq16 (And16 (Const16 [y]) x) (Const16 [y])) - // cond: isPowerOfTwo(y) - // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq16 (Const16 [y]) (And16 x (Const16 [y]))) - // cond: isPowerOfTwo(y) - // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - return false -} -func rewriteValuegeneric_OpEq16_60(v *Value) bool { - b := v.Block - // match: (Eq16 (Const16 [y]) (And16 (Const16 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -11594,6575 +5408,913 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // result: (Eq32 (Const32 [int64(int32(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq32 (Const32 [c]) (Add32 x (Const32 [d]))) - // result: (Eq32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq32 (Add32 (Const32 [d]) x) (Const32 [c])) - // result: (Eq32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq32 (Add32 x (Const32 [d])) (Const32 [c])) - // result: (Eq32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Eq32 (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } - // match: (Eq32 (Const32 [d]) (Const32 [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])))) + // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul32u { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i2] { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := v_1_1_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int64(int32(udivisible(32, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(32 - udivisible(32, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(udivisible(32, c).max)) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh32Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true + break } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u x (Const32 [m])) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) + // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul32u { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpRsh32Ux64 { + continue + } + _ = mul_1.Args[1] + if x != mul_1.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := v_1_1_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int64(int32(udivisible(32, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(32 - udivisible(32, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(udivisible(32, c).max)) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh32Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true + break } - // match: (Eq32 x (Mul32 (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) + // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAvg32u { + continue + } + _ = v_1_1_0.Args[1] + if x != v_1_1_0.Args[0] { + continue + } + mul := v_1_1_0.Args[1] + if mul.Op != OpHmul32u { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i2] { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := v_1_1_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int64(int32(udivisible(32, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(32 - udivisible(32, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(udivisible(32, c).max)) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32Ux64 { - break + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc64to32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int64(int32(udivisible(32, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(32 - udivisible(32, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(udivisible(32, c).max)) + v.AddArg(v4) + return true + } + } } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul32u { - break + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc64to32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpRsh64Ux64 { + continue + } + _ = mul_1.Args[1] + mul_1_0 := mul_1.Args[0] + if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int64(int32(udivisible(32, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(32 - udivisible(32, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(udivisible(32, c).max)) + v.AddArg(v4) + return true + } + } } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc64to32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAvg64u { + continue + } + _ = v_1_1_0_0.Args[1] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpLsh64x64 { + continue + } + _ = v_1_1_0_0_0.Args[1] + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] { + continue + } + v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] + if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 32 { + continue + } + mul := v_1_1_0_0.Args[1] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int64(int32(udivisible(32, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(32 - udivisible(32, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(udivisible(32, c).max)) + v.AddArg(v4) + return true + } + } } - m := mul_0.AuxInt - if x != mul.Args[1] { - break + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) + // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) + for { + _ = v.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh64x64 { + continue + } + _ = v_1_1_1.Args[1] + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(int32(sdivisible(32, c).m)) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(sdivisible(32, c).a)) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int64(32 - sdivisible(32, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int64(int32(sdivisible(32, c).max)) + v.AddArg(v6) + return true + } + } } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true + break } return false } func rewriteValuegeneric_OpEq32_10(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (Eq32 x (Mul32 (Rsh32Ux64 mul:(Hmul32u x (Const32 [m])) (Const64 [s])) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh32Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u x (Const32 [m])) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh32Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Rsh32Ux64 mul:(Hmul32u x (Const32 [m])) (Const64 [s])) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh32Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [m])) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh32Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 || mul_1.Type != typ.UInt32 { - break - } - m := mul_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Rsh32Ux64 mul:(Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [m])) (Const64 [s])) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 || mul_1.Type != typ.UInt32 { - break - } - m := mul_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh32Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [m])) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh32Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 || mul_1.Type != typ.UInt32 { - break - } - m := mul_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh32Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Rsh32Ux64 mul:(Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [m])) (Const64 [s])) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh32Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 || mul_1.Type != typ.UInt32 { - break - } - m := mul_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh32Ux64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAvg32u { - break - } - _ = v_1_1_0.Args[1] - if x != v_1_1_0.Args[0] { - break - } - mul := v_1_1_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u x (Const32 [m]))) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh32Ux64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAvg32u { - break - } - _ = v_1_1_0.Args[1] - if x != v_1_1_0.Args[0] { - break - } - mul := v_1_1_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAvg32u { - break - } - _ = v_1_0_0.Args[1] - if x != v_1_0_0.Args[0] { - break - } - mul := v_1_0_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Rsh32Ux64 (Avg32u x mul:(Hmul32u x (Const32 [m]))) (Const64 [s])) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAvg32u { - break - } - _ = v_1_0_0.Args[1] - if x != v_1_0_0.Args[0] { - break - } - mul := v_1_0_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh32Ux64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAvg32u { - break - } - _ = v_0_1_0.Args[1] - if x != v_0_1_0.Args[0] { - break - } - mul := v_0_1_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u x (Const32 [m]))) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh32Ux64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAvg32u { - break - } - _ = v_0_1_0.Args[1] - if x != v_0_1_0.Args[0] { - break - } - mul := v_0_1_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAvg32u { - break - } - _ = v_0_0_0.Args[1] - if x != v_0_0_0.Args[0] { - break - } - mul := v_0_0_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 (Mul32 (Rsh32Ux64 (Avg32u x mul:(Hmul32u x (Const32 [m]))) (Const64 [s])) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAvg32u { - break - } - _ = v_0_0_0.Args[1] - if x != v_0_0_0.Args[0] { - break - } - mul := v_0_0_0.Args[1] - if mul.Op != OpHmul32u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to32 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (ZeroExt32to64 x) (Const64 [m])) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to32 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to32 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (ZeroExt32to64 x) (Const64 [m])) (Const64 [s]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to32 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to32 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (ZeroExt32to64 x) (Const64 [m])) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to32 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (ZeroExt32to64 x) (Const64 [m])) (Const64 [s]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to32 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_40(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) (Const64 [m])) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to32 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt32to64 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to32 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) (Const64 [m])) (Const64 [s]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to32 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt32to64 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to32 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) (Const64 [m])) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to32 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt32to64 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) (Const64 [m])) (Const64 [s]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - mul_0_0 := mul_0.Args[0] - if mul_0_0.Op != OpZeroExt32to64 || x != mul_0_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to32 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAvg64u { - break - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_1_1_0_0_0.Args[1] - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] { - break - } - v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] - if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 32 { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (ZeroExt32to64 x) (Const64 [m]))) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc64to32 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAvg64u { - break - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_1_1_0_0_0.Args[1] - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] { - break - } - v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] - if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 32 { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to32 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAvg64u { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_1_0_0_0_0.Args[1] - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.AuxInt != 32 { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 x (Mul32 (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (ZeroExt32to64 x) (Const64 [m]))) (Const64 [s]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc64to32 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAvg64u { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_1_0_0_0_0.Args[1] - v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] - if v_1_0_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_0_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.AuxInt != 32 { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to32 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAvg64u { - break - } - _ = v_0_1_0_0.Args[1] - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_0_1_0_0_0.Args[1] - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_0_1_0_0_0_0.Args[0] { - break - } - v_0_1_0_0_0_1 := v_0_1_0_0_0.Args[1] - if v_0_1_0_0_0_1.Op != OpConst64 || v_0_1_0_0_0_1.AuxInt != 32 { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (ZeroExt32to64 x) (Const64 [m]))) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc64to32 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAvg64u { - break - } - _ = v_0_1_0_0.Args[1] - v_0_1_0_0_0 := v_0_1_0_0.Args[0] - if v_0_1_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_0_1_0_0_0.Args[1] - v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] - if v_0_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_0_1_0_0_0_0.Args[0] { - break - } - v_0_1_0_0_0_1 := v_0_1_0_0_0.Args[1] - if v_0_1_0_0_0_1.Op != OpConst64 || v_0_1_0_0_0_1.AuxInt != 32 { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAvg64u { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_0_0_0_0_0.Args[1] - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpZeroExt32to64 || x != v_0_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.AuxInt != 32 { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 (Mul32 (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (ZeroExt32to64 x) (Const64 [m]))) (Const64 [s]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc64to32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAvg64u { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpLsh64x64 { - break - } - _ = v_0_0_0_0_0.Args[1] - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpZeroExt32to64 || x != v_0_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.AuxInt != 32 { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int64(int32(udivisible(32, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(32 - udivisible(32, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(udivisible(32, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (SignExt32to64 x) (Const64 [m])) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpSignExt32to64 || x != v_1_0_1_0.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh64x64 mul:(Mul64 (SignExt32to64 x) (Const64 [m])) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpSignExt32to64 || x != v_1_0_1_0.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpSignExt32to64 || x != v_0_1_1_0.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_60(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (SignExt32to64 x) (Const64 [m])) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpSignExt32to64 || x != v_0_1_1_0.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpSignExt32to64 || x != v_0_0_1_0.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh64x64 mul:(Mul64 (SignExt32to64 x) (Const64 [m])) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt32to64 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpSignExt32to64 || x != v_0_0_1_0.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))))) + // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpHmul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i2] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(int32(sdivisible(32, c).m)) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(sdivisible(32, c).a)) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int64(32 - sdivisible(32, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int64(int32(sdivisible(32, c).max)) + v.AddArg(v6) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true + break } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 x (Const32 [m])) (Const64 [s])) (Rsh32x64 x (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh32x64 mul:(Hmul32 x (Const32 [m])) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 x (Const32 [m])) (Const64 [s])) (Rsh32x64 x (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_70(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 (Mul32 (Sub32 (Rsh32x64 mul:(Hmul32 x (Const32 [m])) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))))) + // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAdd32 { + continue + } + _ = v_1_1_0_0.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul := v_1_1_0_0.Args[_i2] + if mul.Op != OpHmul32 { + continue + } + _ = mul.Args[1] + for _i3 := 0; _i3 <= 1; _i3++ { + mul_0 := mul.Args[_i3] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i3] || x != v_1_1_0_0.Args[1^_i2] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int64(int32(sdivisible(32, c).m)) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int64(int32(sdivisible(32, c).a)) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int64(32 - sdivisible(32, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int64(int32(sdivisible(32, c).max)) + v.AddArg(v6) + return true + } + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd32 { - break - } - _ = v_1_1_0_0.Args[1] - mul := v_1_1_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_1_1_0_0.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true + break } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 x (Const32 [m])) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd32 { - break - } - _ = v_1_1_0_0.Args[1] - mul := v_1_1_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - if x != v_1_1_0_0.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 (Const32 [m]) x)) (Const64 [s])) (Rsh32x64 x (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd32 { - break - } - _ = v_1_1_0_0.Args[1] - if x != v_1_1_0_0.Args[0] { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 x (Const32 [m]))) (Const64 [s])) (Rsh32x64 x (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub32 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd32 { - break - } - _ = v_1_1_0_0.Args[1] - if x != v_1_1_0_0.Args[0] { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd32 { - break - } - _ = v_1_0_0_0.Args[1] - mul := v_1_0_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_1_0_0_0.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 x (Const32 [m])) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd32 { - break - } - _ = v_1_0_0_0.Args[1] - mul := v_1_0_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - if x != v_1_0_0_0.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 (Const32 [m]) x)) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd32 { - break - } - _ = v_1_0_0_0.Args[1] - if x != v_1_0_0_0.Args[0] { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 x (Mul32 (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 x (Const32 [m]))) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub32 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd32 { - break - } - _ = v_1_0_0_0.Args[1] - if x != v_1_0_0_0.Args[0] { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd32 { - break - } - _ = v_0_1_0_0.Args[1] - mul := v_0_1_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_0_1_0_0.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - return false -} -func rewriteValuegeneric_OpEq32_80(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 x (Const32 [m])) x) (Const64 [s])) (Rsh32x64 x (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd32 { - break - } - _ = v_0_1_0_0.Args[1] - mul := v_0_1_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - if x != v_0_1_0_0.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 (Const32 [m]) x)) (Const64 [s])) (Rsh32x64 x (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd32 { - break - } - _ = v_0_1_0_0.Args[1] - if x != v_0_1_0_0.Args[0] { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 x (Const32 [m]))) (Const64 [s])) (Rsh32x64 x (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub32 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd32 { - break - } - _ = v_0_1_0_0.Args[1] - if x != v_0_1_0_0.Args[0] { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd32 { - break - } - _ = v_0_0_0_0.Args[1] - mul := v_0_0_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_0_0_0_0.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 x (Const32 [m])) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd32 { - break - } - _ = v_0_0_0_0.Args[1] - mul := v_0_0_0_0.Args[0] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - if x != v_0_0_0_0.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 (Const32 [m]) x)) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd32 { - break - } - _ = v_0_0_0_0.Args[1] - if x != v_0_0_0_0.Args[0] { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 (Mul32 (Sub32 (Rsh32x64 (Add32 x mul:(Hmul32 x (Const32 [m]))) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) (Const32 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub32 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd32 { - break - } - _ = v_0_0_0_0.Args[1] - if x != v_0_0_0_0.Args[0] { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpHmul32 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) { - break - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int64(int32(sdivisible(32, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int64(int32(sdivisible(32, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int64(32 - sdivisible(32, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int64(int32(sdivisible(32, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 31 && kbar == 32 - k // result: (Eq32 (And32 n (Const32 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh32x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh32x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd32 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 31 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int64(1< 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Eq32 (And32 n (Const32 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh32x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd32 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh32Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh32x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 31 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Eq32 (And32 n (Const32 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh32x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd32 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh32Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh32x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 31 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Eq32 (And32 n (Const32 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh32x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd32 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh32Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh32x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 31 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< x (Const32 [y])) (Const32 [y])) // cond: isPowerOfTwo(y) // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd32 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst32 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq32 (And32 (Const32 [y]) x) (Const32 [y])) - // cond: isPowerOfTwo(y) - // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq32 (Const32 [y]) (And32 x (Const32 [y]))) - // cond: isPowerOfTwo(y) - // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq32 (Const32 [y]) (And32 (Const32 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -18171,37 +6323,22 @@ func rewriteValuegeneric_OpEq32F_0(v *Value) bool { // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d)) - return true - } - // match: (Eq32F (Const32F [d]) (Const32F [c])) - // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d)) - return true + break } return false } @@ -18223,4171 +6360,585 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // result: (Eq64 (Const64 [c-d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq64 (Const64 [c]) (Add64 x (Const64 [d]))) - // result: (Eq64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq64 (Add64 (Const64 [d]) x) (Const64 [c])) - // result: (Eq64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq64 (Add64 x (Const64 [d])) (Const64 [c])) - // result: (Eq64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Eq64 (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } - // match: (Eq64 (Const64 [d]) (Const64 [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])))) + // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul64u { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i2] { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := v_1_1_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64(udivisible(64, c).m) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64(64 - udivisible(64, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64(udivisible(64, c).max) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh64Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true + break } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u x (Const64 [m])) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh64Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq64_10(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq64 x (Mul64 (Rsh64Ux64 mul:(Hmul64u x (Const64 [m])) (Const64 [s])) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh64Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u x (Const64 [m])) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh64Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Rsh64Ux64 mul:(Hmul64u x (Const64 [m])) (Const64 [s])) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])))) + // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul64u { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpRsh64Ux64 { + continue + } + _ = mul_1.Args[1] + if x != mul_1.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := v_1_1_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64(udivisible(64, c).m) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64(64 - udivisible(64, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64(udivisible(64, c).max) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh64Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true + break } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [m])) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh64Ux64 { - break - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Rsh64Ux64 mul:(Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [m])) (Const64 [s])) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0.Args[1] - mul := v_1_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh64Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq64_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq64 (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [m])) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh64Ux64 { - break - } - _ = v_0_1.Args[1] - mul := v_0_1.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpRsh64Ux64 { - break - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - break - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Rsh64Ux64 mul:(Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [m])) (Const64 [s])) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0.Args[1] - mul := v_0_0.Args[0] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpRsh64Ux64 { - break - } - _ = mul_0.Args[1] - if x != mul_0.Args[0] { - break - } - mul_0_1 := mul_0.Args[1] - if mul_0_1.Op != OpConst64 || mul_0_1.AuxInt != 1 { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])))) + // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAvg64u { + continue + } + _ = v_1_1_0.Args[1] + if x != v_1_1_0.Args[0] { + continue + } + mul := v_1_1_0.Args[1] + if mul.Op != OpHmul64u { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i2] { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := v_1_1_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64(udivisible(64, c).m) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64(64 - udivisible(64, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64(udivisible(64, c).max) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh64Ux64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAvg64u { - break - } - _ = v_1_1_0.Args[1] - if x != v_1_1_0.Args[0] { - break - } - mul := v_1_1_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true + break } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u x (Const64 [m]))) (Const64 [s])))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpRsh64Ux64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAvg64u { - break - } - _ = v_1_1_0.Args[1] - if x != v_1_1_0.Args[0] { - break - } - mul := v_1_1_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - break - } - s := v_1_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAvg64u { - break - } - _ = v_1_0_0.Args[1] - if x != v_1_0_0.Args[0] { - break - } - mul := v_1_0_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Rsh64Ux64 (Avg64u x mul:(Hmul64u x (Const64 [m]))) (Const64 [s])) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64Ux64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAvg64u { - break - } - _ = v_1_0_0.Args[1] - if x != v_1_0_0.Args[0] { - break - } - mul := v_1_0_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 { - break - } - s := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh64Ux64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAvg64u { - break - } - _ = v_0_1_0.Args[1] - if x != v_0_1_0.Args[0] { - break - } - mul := v_0_1_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u x (Const64 [m]))) (Const64 [s]))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpRsh64Ux64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAvg64u { - break - } - _ = v_0_1_0.Args[1] - if x != v_0_1_0.Args[0] { - break - } - mul := v_0_1_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpConst64 { - break - } - s := v_0_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 (Mul64 (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAvg64u { - break - } - _ = v_0_0_0.Args[1] - if x != v_0_0_0.Args[0] { - break - } - mul := v_0_0_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - return false -} -func rewriteValuegeneric_OpEq64_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq64 (Mul64 (Rsh64Ux64 (Avg64u x mul:(Hmul64u x (Const64 [m]))) (Const64 [s])) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64Ux64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAvg64u { - break - } - _ = v_0_0_0.Args[1] - if x != v_0_0_0.Args[0] { - break - } - mul := v_0_0_0.Args[1] - if mul.Op != OpHmul64u { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64(udivisible(64, c).m) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(64 - udivisible(64, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(udivisible(64, c).max) - v.AddArg(v4) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))))) + // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpHmul64 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i2] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh64x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64(sdivisible(64, c).m) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64(sdivisible(64, c).a) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64(64 - sdivisible(64, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64(sdivisible(64, c).max) + v.AddArg(v6) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true + break } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 x (Const64 [m])) (Const64 [s])) (Rsh64x64 x (Const64 [63]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Sub64 (Rsh64x64 mul:(Hmul64 x (Const64 [m])) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 x (Const64 [m])) (Const64 [s])) (Rsh64x64 x (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Sub64 (Rsh64x64 mul:(Hmul64 x (Const64 [m])) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))))) + // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAdd64 { + continue + } + _ = v_1_1_0_0.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul := v_1_1_0_0.Args[_i2] + if mul.Op != OpHmul64 { + continue + } + _ = mul.Args[1] + for _i3 := 0; _i3 <= 1; _i3++ { + mul_0 := mul.Args[_i3] + if mul_0.Op != OpConst64 { + continue + } + m := mul_0.AuxInt + if x != mul.Args[1^_i3] || x != v_1_1_0_0.Args[1^_i2] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh64x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64(sdivisible(64, c).m) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64(sdivisible(64, c).a) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64(64 - sdivisible(64, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64(sdivisible(64, c).max) + v.AddArg(v6) + return true + } + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd64 { - break - } - _ = v_1_1_0_0.Args[1] - mul := v_1_1_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_1_1_0_0.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true + break } - return false -} -func rewriteValuegeneric_OpEq64_40(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 x (Const64 [m])) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd64 { - break - } - _ = v_1_1_0_0.Args[1] - mul := v_1_1_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - if x != v_1_1_0_0.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 (Const64 [m]) x)) (Const64 [s])) (Rsh64x64 x (Const64 [63]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd64 { - break - } - _ = v_1_1_0_0.Args[1] - if x != v_1_1_0_0.Args[0] { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 x (Const64 [m]))) (Const64 [s])) (Rsh64x64 x (Const64 [63]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub64 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd64 { - break - } - _ = v_1_1_0_0.Args[1] - if x != v_1_1_0_0.Args[0] { - break - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - break - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd64 { - break - } - _ = v_1_0_0_0.Args[1] - mul := v_1_0_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_1_0_0_0.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 x (Const64 [m])) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd64 { - break - } - _ = v_1_0_0_0.Args[1] - mul := v_1_0_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - if x != v_1_0_0_0.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 (Const64 [m]) x)) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd64 { - break - } - _ = v_1_0_0_0.Args[1] - if x != v_1_0_0_0.Args[0] { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 x (Mul64 (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 x (Const64 [m]))) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh64x64 { - break - } - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpAdd64 { - break - } - _ = v_1_0_0_0.Args[1] - if x != v_1_0_0_0.Args[0] { - break - } - mul := v_1_0_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh64x64 { - break - } - _ = v_1_0_1.Args[1] - if x != v_1_0_1.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 63 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd64 { - break - } - _ = v_0_1_0_0.Args[1] - mul := v_0_1_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_0_1_0_0.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 x (Const64 [m])) x) (Const64 [s])) (Rsh64x64 x (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd64 { - break - } - _ = v_0_1_0_0.Args[1] - mul := v_0_1_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - if x != v_0_1_0_0.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 (Const64 [m]) x)) (Const64 [s])) (Rsh64x64 x (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd64 { - break - } - _ = v_0_1_0_0.Args[1] - if x != v_0_1_0_0.Args[0] { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - return false -} -func rewriteValuegeneric_OpEq64_50(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq64 (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 x (Const64 [m]))) (Const64 [s])) (Rsh64x64 x (Const64 [63])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub64 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh64x64 { - break - } - _ = v_0_1_0.Args[1] - v_0_1_0_0 := v_0_1_0.Args[0] - if v_0_1_0_0.Op != OpAdd64 { - break - } - _ = v_0_1_0_0.Args[1] - if x != v_0_1_0_0.Args[0] { - break - } - mul := v_0_1_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh64x64 { - break - } - _ = v_0_1_1.Args[1] - if x != v_0_1_1.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd64 { - break - } - _ = v_0_0_0_0.Args[1] - mul := v_0_0_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] || x != v_0_0_0_0.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 x (Const64 [m])) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd64 { - break - } - _ = v_0_0_0_0.Args[1] - mul := v_0_0_0_0.Args[0] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - if x != v_0_0_0_0.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 (Const64 [m]) x)) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd64 { - break - } - _ = v_0_0_0_0.Args[1] - if x != v_0_0_0_0.Args[0] { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst64 { - break - } - m := mul_0.AuxInt - if x != mul.Args[1] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 (Mul64 (Sub64 (Rsh64x64 (Add64 x mul:(Hmul64 x (Const64 [m]))) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) (Const64 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAdd64 { - break - } - _ = v_0_0_0_0.Args[1] - if x != v_0_0_0_0.Args[0] { - break - } - mul := v_0_0_0_0.Args[1] - if mul.Op != OpHmul64 { - break - } - _ = mul.Args[1] - if x != mul.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst64 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh64x64 { - break - } - _ = v_0_0_1.Args[1] - if x != v_0_0_1.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 63 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) { - break - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64(sdivisible(64, c).m) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64(sdivisible(64, c).a) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64(64 - sdivisible(64, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64(sdivisible(64, c).max) - v.AddArg(v6) - return true - } - // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 63 && kbar == 64 - k // result: (Eq64 (And64 n (Const64 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh64x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh64x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd64 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 63 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64(1< 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Eq64 (And64 n (Const64 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh64x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd64 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh64Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh64x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 63 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Eq64 (And64 n (Const64 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh64x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd64 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh64Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh64x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 63 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Eq64 (And64 n (Const64 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh64x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd64 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh64Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh64x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 63 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< x (Const64 [y])) (Const64 [y])) // cond: isPowerOfTwo(y) // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd64 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst64 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq64 (And64 (Const64 [y]) x) (Const64 [y])) - // cond: isPowerOfTwo(y) - // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq64 (Const64 [y]) (And64 x (Const64 [y]))) - // cond: isPowerOfTwo(y) - // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq64 (Const64 [y]) (And64 (Const64 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -22396,37 +6947,22 @@ func rewriteValuegeneric_OpEq64F_0(v *Value) bool { // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d)) - return true - } - // match: (Eq64F (Const64F [d]) (Const64F [c])) - // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d)) - return true + break } return false } @@ -22449,1904 +6985,421 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // result: (Eq8 (Const8 [int64(int8(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq8 (Const8 [c]) (Add8 x (Const8 [d]))) - // result: (Eq8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq8 (Add8 (Const8 [d]) x) (Const8 [c])) - // result: (Eq8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq8 (Add8 x (Const8 [d])) (Const8 [c])) - // result: (Eq8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Eq8 (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (Eq8 (Const8 [d]) (Const8 [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } // match: (Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) // cond: x.Op != OpConst8 && udivisibleOK(8,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32u (ZeroExt8to32 x) (Const32 [c&0xff])) (Const32 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMod8u { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMod8u { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst8 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && udivisibleOK(8, c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = c & 0xff + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = 0 + v.AddArg(v3) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && udivisibleOK(8, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = c & 0xff - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = 0 - v.AddArg(v3) - return true - } - // match: (Eq8 (Const8 [0]) (Mod8u x (Const8 [c]))) - // cond: x.Op != OpConst8 && udivisibleOK(8,c) && !hasSmallRotate(config) - // result: (Eq32 (Mod32u (ZeroExt8to32 x) (Const32 [c&0xff])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpMod8u { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c := v_1_1.AuxInt - if !(x.Op != OpConst8 && udivisibleOK(8, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = c & 0xff - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = 0 - v.AddArg(v3) - return true + break } // match: (Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) // cond: x.Op != OpConst8 && sdivisibleOK(8,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32 (SignExt8to32 x) (Const32 [c])) (Const32 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMod8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpMod8 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst8 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && sdivisibleOK(8, c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v2.AuxInt = c + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v3.AuxInt = 0 + v.AddArg(v3) + return true } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && sdivisibleOK(8, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v2.AuxInt = c - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v3.AuxInt = 0 - v.AddArg(v3) - return true + break } - return false -} -func rewriteValuegeneric_OpEq8_10(v *Value) bool { - b := v.Block - config := b.Func.Config - typ := &b.Func.Config.Types - // match: (Eq8 (Const8 [0]) (Mod8 x (Const8 [c]))) - // cond: x.Op != OpConst8 && sdivisibleOK(8,c) && !hasSmallRotate(config) - // result: (Eq32 (Mod32 (SignExt8to32 x) (Const32 [c])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpMod8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c := v_1_1.AuxInt - if !(x.Op != OpConst8 && sdivisibleOK(8, c) && !hasSmallRotate(config)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v2.AuxInt = c - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) - v3.AuxInt = 0 - v.AddArg(v3) - return true - } - // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))))) + // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpTrunc32to8 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) + v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v2.AuxInt = int64(int8(udivisible(8, c).m)) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v3.AuxInt = int64(8 - udivisible(8, c).k) + v0.AddArg(v3) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v4.AuxInt = int64(int8(udivisible(8, c).max)) + v.AddArg(v4) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to8 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true + break } - // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (ZeroExt8to32 x) (Const32 [m])) (Const64 [s]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpTrunc32to8 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 x (Mul8 (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) (Const8 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to8 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 x (Mul8 (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (ZeroExt8to32 x) (Const32 [m])) (Const64 [s]))) (Const8 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpTrunc32to8 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to8 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (ZeroExt8to32 x) (Const32 [m])) (Const64 [s])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpTrunc32to8 { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 (Mul8 (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) (Const8 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to8 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 (Mul8 (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (ZeroExt8to32 x) (Const32 [m])) (Const64 [s]))) (Const8 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpTrunc32to8 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32Ux64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpZeroExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int64(int8(udivisible(8, c).m)) - v1.AddArg(v2) - v1.AddArg(x) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(8 - udivisible(8, c).k) - v0.AddArg(v3) - v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(udivisible(8, c).max)) - v.AddArg(v4) - return true - } - // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))))) + // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 { + continue + } + c := v_1_0.AuxInt + v_1_1 := v_1.Args[1^_i1] + if v_1_1.Op != OpSub8 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2++ { + mul_0 := mul.Args[_i2] + if mul_0.Op != OpConst32 { + continue + } + m := mul_0.AuxInt + mul_1 := mul.Args[1^_i2] + if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := v_1_1_0_1.AuxInt + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) + v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) + v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v3.AuxInt = int64(int8(sdivisible(8, c).m)) + v2.AddArg(v3) + v2.AddArg(x) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v4.AuxInt = int64(int8(sdivisible(8, c).a)) + v1.AddArg(v4) + v0.AddArg(v1) + v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v5.AuxInt = int64(8 - sdivisible(8, c).k) + v0.AddArg(v5) + v.AddArg(v0) + v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v6.AuxInt = int64(int8(sdivisible(8, c).max)) + v.AddArg(v6) + return true + } + } } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub8 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { - break - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true + break } - return false -} -func rewriteValuegeneric_OpEq8_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (SignExt8to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - c := v_1_0.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSub8 { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - break - } - s := v_1_1_0_1.AuxInt - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - break - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] { - break - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 x (Mul8 (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) (Const8 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub8 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { - break - } - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpSignExt8to32 || x != v_1_0_1_0.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 x (Mul8 (Sub8 (Rsh32x64 mul:(Mul32 (SignExt8to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) (Const8 [c]))) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSub8 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpRsh32x64 { - break - } - _ = v_1_0_0.Args[1] - mul := v_1_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_1_0_0_1 := v_1_0_0.Args[1] - if v_1_0_0_1.Op != OpConst64 { - break - } - s := v_1_0_0_1.AuxInt - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpRsh32x64 { - break - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpSignExt8to32 || x != v_1_0_1_0.Args[0] { - break - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpConst64 || v_1_0_1_1.AuxInt != 31 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c := v_1_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub8 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { - break - } - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpSignExt8to32 || x != v_0_1_1_0.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (SignExt8to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31])))) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSub8 { - break - } - _ = v_0_1.Args[1] - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpRsh32x64 { - break - } - _ = v_0_1_0.Args[1] - mul := v_0_1_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_1_0_1 := v_0_1_0.Args[1] - if v_0_1_0_1.Op != OpConst64 { - break - } - s := v_0_1_0_1.AuxInt - v_0_1_1 := v_0_1.Args[1] - if v_0_1_1.Op != OpRsh32x64 { - break - } - _ = v_0_1_1.Args[1] - v_0_1_1_0 := v_0_1_1.Args[0] - if v_0_1_1_0.Op != OpSignExt8to32 || x != v_0_1_1_0.Args[0] { - break - } - v_0_1_1_1 := v_0_1_1.Args[1] - if v_0_1_1_1.Op != OpConst64 || v_0_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 (Mul8 (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) (Const8 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub8 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpConst32 { - break - } - m := mul_0.AuxInt - mul_1 := mul.Args[1] - if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpSignExt8to32 || x != v_0_0_1_0.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 (Mul8 (Sub8 (Rsh32x64 mul:(Mul32 (SignExt8to32 x) (Const32 [m])) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) (Const8 [c])) x) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSub8 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0_0.Args[1] - mul := v_0_0_0.Args[0] - if mul.Op != OpMul32 { - break - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - if mul_0.Op != OpSignExt8to32 || x != mul_0.Args[0] { - break - } - mul_1 := mul.Args[1] - if mul_1.Op != OpConst32 { - break - } - m := mul_1.AuxInt - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpConst64 { - break - } - s := v_0_0_0_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpRsh32x64 { - break - } - _ = v_0_0_1.Args[1] - v_0_0_1_0 := v_0_0_1.Args[0] - if v_0_0_1_0.Op != OpSignExt8to32 || x != v_0_0_1_0.Args[0] { - break - } - v_0_0_1_1 := v_0_0_1.Args[1] - if v_0_0_1_1.Op != OpConst64 || v_0_0_1_1.AuxInt != 31 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) { - break - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int64(int8(sdivisible(8, c).m)) - v2.AddArg(v3) - v2.AddArg(x) - v1.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int64(int8(sdivisible(8, c).a)) - v1.AddArg(v4) - v0.AddArg(v1) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int64(8 - sdivisible(8, c).k) - v0.AddArg(v5) - v.AddArg(v0) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int64(int8(sdivisible(8, c).max)) - v.AddArg(v6) - return true - } - // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 7 && kbar == 8 - k // result: (Eq8 (And8 n (Const8 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh8x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh8x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh8x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd8 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 7 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int64(1< 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Eq8 (And8 n (Const8 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh8x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh8x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd8 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh8Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh8x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 7 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Eq8 (And8 n (Const8 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh8x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh8x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd8 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh8Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh8x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 7 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Eq8 (And8 n (Const8 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh8x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh8x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd8 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh8Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh8x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 7 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< x (Const8 [y])) (Const8 [y])) // cond: isPowerOfTwo(y) // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd8 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst8 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq8 (And8 (Const8 [y]) x) (Const8 [y])) - // cond: isPowerOfTwo(y) - // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq8 (Const8 [y]) (And8 x (Const8 [y]))) - // cond: isPowerOfTwo(y) - // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Eq8 (Const8 [y]) (And8 (Const8 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -24355,89 +7408,55 @@ func rewriteValuegeneric_OpEqB_0(v *Value) bool { // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstBool { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConstBool { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (EqB (ConstBool [d]) (ConstBool [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } // match: (EqB (ConstBool [0]) x) // result: (Not x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool || v_0.AuxInt != 0 { - break - } - v.reset(OpNot) - v.AddArg(x) - return true - } - // match: (EqB x (ConstBool [0])) - // result: (Not x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstBool || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstBool || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpNot) + v.AddArg(x) + return true } - v.reset(OpNot) - v.AddArg(x) - return true + break } // match: (EqB (ConstBool [1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (EqB x (ConstBool [1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstBool || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstBool || v_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } return false } @@ -24475,755 +7494,433 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // result: (ConstBool [b2i(a == b)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAddr { + continue + } + a := v_0.Aux + v_1 := v.Args[1^_i0] + if v_1.Op != OpAddr { + continue + } + b := v_1.Aux + v.reset(OpConstBool) + v.AuxInt = b2i(a == b) + return true } - a := v_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - b := v_1.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a == b) - return true - } - // match: (EqPtr (Addr {b} _) (Addr {a} _)) - // result: (ConstBool [b2i(a == b)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - b := v_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - a := v_1.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a == b) - return true + break } // match: (EqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) // result: (ConstBool [b2i(a == b && o == 0)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAddr { + continue + } + a := v_0.Aux + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + b := v_1_0.Aux + v.reset(OpConstBool) + v.AuxInt = b2i(a == b && o == 0) + return true } - a := v_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - b := v_1_0.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o == 0) - return true - } - // match: (EqPtr (OffPtr [o] (Addr {b} _)) (Addr {a} _)) - // result: (ConstBool [b2i(a == b && o == 0)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - b := v_0_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - a := v_1.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o == 0) - return true + break } // match: (EqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) // result: (ConstBool [b2i(a == b && o1 == o2)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + continue + } + a := v_0_0.Aux + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o2 := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + b := v_1_0.Aux + v.reset(OpConstBool) + v.AuxInt = b2i(a == b && o1 == o2) + return true } - o1 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - a := v_0_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o2 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - b := v_1_0.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o1 == o2) - return true - } - // match: (EqPtr (OffPtr [o2] (Addr {b} _)) (OffPtr [o1] (Addr {a} _))) - // result: (ConstBool [b2i(a == b && o1 == o2)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o2 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - b := v_0_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o1 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - a := v_1_0.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o1 == o2) - return true + break } // match: (EqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) // result: (ConstBool [b2i(a == b)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + a := v_0.Aux + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLocalAddr { + continue + } + b := v_1.Aux + _ = v_1.Args[1] + v.reset(OpConstBool) + v.AuxInt = b2i(a == b) + return true } - a := v_0.Aux - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - b := v_1.Aux - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a == b) - return true - } - // match: (EqPtr (LocalAddr {b} _ _) (LocalAddr {a} _ _)) - // result: (ConstBool [b2i(a == b)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break - } - b := v_0.Aux - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - a := v_1.Aux - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a == b) - return true + break } // match: (EqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a == b && o == 0)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + a := v_0.Aux + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + b := v_1_0.Aux + _ = v_1_0.Args[1] + v.reset(OpConstBool) + v.AuxInt = b2i(a == b && o == 0) + return true } - a := v_0.Aux - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - b := v_1_0.Aux - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o == 0) - return true - } - return false -} -func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { - // match: (EqPtr (OffPtr [o] (LocalAddr {b} _ _)) (LocalAddr {a} _ _)) - // result: (ConstBool [b2i(a == b && o == 0)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - b := v_0_0.Aux - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - a := v_1.Aux - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o == 0) - return true + break } // match: (EqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a == b && o1 == o2)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + a := v_0_0.Aux + _ = v_0_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o2 := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + b := v_1_0.Aux + _ = v_1_0.Args[1] + v.reset(OpConstBool) + v.AuxInt = b2i(a == b && o1 == o2) + return true } - o1 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - a := v_0_0.Aux - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o2 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - b := v_1_0.Aux - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o1 == o2) - return true - } - // match: (EqPtr (OffPtr [o2] (LocalAddr {b} _ _)) (OffPtr [o1] (LocalAddr {a} _ _))) - // result: (ConstBool [b2i(a == b && o1 == o2)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o2 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - b := v_0_0.Aux - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o1 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - a := v_1_0.Aux - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a == b && o1 == o2) - return true + break } // match: (EqPtr (OffPtr [o1] p1) p2) // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 == 0)]) - for { - p2 := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o1 := v_0.AuxInt - p1 := v_0.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 == 0) - return true - } - // match: (EqPtr p2 (OffPtr [o1] p1)) - // cond: isSamePtr(p1, p2) - // result: (ConstBool [b2i(o1 == 0)]) for { _ = v.Args[1] - p2 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + p1 := v_0.Args[0] + p2 := v.Args[1^_i0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = b2i(o1 == 0) + return true } - o1 := v_1.AuxInt - p1 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 == 0) - return true + break } // match: (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 == o2)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + p1 := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o2 := v_1.AuxInt + p2 := v_1.Args[0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = b2i(o1 == o2) + return true } - o1 := v_0.AuxInt - p1 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o2 := v_1.AuxInt - p2 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 == o2) - return true - } - // match: (EqPtr (OffPtr [o2] p2) (OffPtr [o1] p1)) - // cond: isSamePtr(p1, p2) - // result: (ConstBool [b2i(o1 == o2)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o2 := v_0.AuxInt - p2 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o1 := v_1.AuxInt - p1 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 == o2) - return true + break } // match: (EqPtr (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (EqPtr (Const32 [d]) (Const32 [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } + return false +} +func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types // match: (EqPtr (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c == d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - return false -} -func rewriteValuegeneric_OpEqPtr_20(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (EqPtr (Const64 [d]) (Const64 [c])) - // result: (ConstBool [b2i(c == d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true + break } // match: (EqPtr (LocalAddr _ _) (Addr _)) // result: (ConstBool [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 0 + return true } - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 0 - return true - } - // match: (EqPtr (Addr _) (LocalAddr _ _)) - // result: (ConstBool [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = 0 - return true + break } // match: (EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) // result: (ConstBool [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + _ = v_0_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 0 + return true } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 0 - return true - } - // match: (EqPtr (Addr _) (OffPtr (LocalAddr _ _))) - // result: (ConstBool [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = 0 - return true + break } // match: (EqPtr (LocalAddr _ _) (OffPtr (Addr _))) // result: (ConstBool [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 0 + return true } - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 0 - return true - } - // match: (EqPtr (OffPtr (Addr _)) (LocalAddr _ _)) - // result: (ConstBool [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = 0 - return true + break } // match: (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) // result: (ConstBool [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + _ = v_0_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 0 + return true } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 0 - return true - } - // match: (EqPtr (OffPtr (Addr _)) (OffPtr (LocalAddr _ _))) - // result: (ConstBool [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = 0 - return true + break } // match: (EqPtr (AddPtr p1 o1) p2) // cond: isSamePtr(p1, p2) // result: (Not (IsNonNil o1)) - for { - p2 := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddPtr { - break - } - o1 := v_0.Args[1] - p1 := v_0.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(o1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpEqPtr_30(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (EqPtr p2 (AddPtr p1 o1)) - // cond: isSamePtr(p1, p2) - // result: (Not (IsNonNil o1)) for { _ = v.Args[1] - p2 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAddPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAddPtr { + continue + } + o1 := v_0.Args[1] + p1 := v_0.Args[0] + p2 := v.Args[1^_i0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(o1) + v.AddArg(v0) + return true } - o1 := v_1.Args[1] - p1 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(o1) - v.AddArg(v0) - return true + break } // match: (EqPtr (Const32 [0]) p) // result: (Not (IsNonNil p)) - for { - p := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break - } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(p) - v.AddArg(v0) - return true - } - // match: (EqPtr p (Const32 [0])) - // result: (Not (IsNonNil p)) for { _ = v.Args[1] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + p := v.Args[1^_i0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(p) - v.AddArg(v0) - return true + break } // match: (EqPtr (Const64 [0]) p) // result: (Not (IsNonNil p)) - for { - p := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break - } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(p) - v.AddArg(v0) - return true - } - // match: (EqPtr p (Const64 [0])) - // result: (Not (IsNonNil p)) for { _ = v.Args[1] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + p := v.Args[1^_i0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(p) - v.AddArg(v0) - return true + break } // match: (EqPtr (ConstNil) p) // result: (Not (IsNonNil p)) - for { - p := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstNil { - break - } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(p) - v.AddArg(v0) - return true - } - // match: (EqPtr p (ConstNil)) - // result: (Not (IsNonNil p)) for { _ = v.Args[1] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstNil { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstNil { + continue + } + p := v.Args[1^_i0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true } - v.reset(OpNot) - v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) - v0.AddArg(p) - v.AddArg(v0) - return true + break } return false } @@ -25274,41 +7971,21 @@ func rewriteValuegeneric_OpGeq16_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpConst16 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(int16(c) >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(int16(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (Geq16 (And16 (Const16 [c]) _) (Const16 [0])) - // cond: int16(c) >= 0 - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(int16(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } return false } @@ -25362,41 +8039,21 @@ func rewriteValuegeneric_OpGeq32_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpConst32 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 || v_1.AuxInt != 0 || !(int32(c) >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 || !(int32(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (Geq32 (And32 (Const32 [c]) _) (Const32 [0])) - // cond: int32(c) >= 0 - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 || !(int32(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } return false } @@ -25471,41 +8128,21 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpConst64 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(int64(c) >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(int64(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (Geq64 (And64 (Const64 [c]) _) (Const64 [0])) - // cond: int64(c) >= 0 - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(int64(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (Geq64 (Rsh64Ux64 _ (Const64 [c])) (Const64 [0])) // cond: c > 0 @@ -25603,41 +8240,21 @@ func rewriteValuegeneric_OpGeq8_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_1 := v_0.Args[1^_i0] + if v_0_1.Op != OpConst8 { + continue + } + c := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(int8(c) >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(int8(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (Geq8 (And8 (Const8 [c]) _) (Const8 [0])) - // cond: int8(c) >= 0 - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(int8(c) >= 0) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } return false } @@ -26054,49 +8671,25 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst8 { + continue + } + c := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (And8 _ (Const8 [c])) (Const8 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) // cond: 0 <= c && c < d @@ -26112,53 +8705,25 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { break } _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0_0 := v_0_0.Args[_i0] + if v_0_0_0.Op != OpConst8 { + continue + } + c := v_0_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (ZeroExt8to16 (And8 _ (Const8 [c]))) (Const16 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd8 { - break - } - _ = v_0_0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst8 { - break - } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) // cond: 0 <= c && c < d @@ -26174,56 +8739,25 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { break } _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0_0 := v_0_0.Args[_i0] + if v_0_0_0.Op != OpConst8 { + continue + } + c := v_0_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - return false -} -func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { - // match: (IsInBounds (ZeroExt8to32 (And8 _ (Const8 [c]))) (Const32 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd8 { - break - } - _ = v_0_0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst8 { - break - } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) // cond: 0 <= c && c < d @@ -26239,53 +8773,25 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0_0 := v_0_0.Args[_i0] + if v_0_0_0.Op != OpConst8 { + continue + } + c := v_0_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (ZeroExt8to64 (And8 _ (Const8 [c]))) (Const64 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd8 { - break - } - _ = v_0_0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst8 { - break - } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) // cond: 0 <= c && c < d @@ -26297,50 +8803,29 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst16 { + continue + } + c := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (And16 _ (Const16 [c])) (Const16 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } + return false +} +func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) @@ -26355,53 +8840,25 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0_0 := v_0_0.Args[_i0] + if v_0_0_0.Op != OpConst16 { + continue + } + c := v_0_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (ZeroExt16to32 (And16 _ (Const16 [c]))) (Const32 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd16 { - break - } - _ = v_0_0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst16 { - break - } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) // cond: 0 <= c && c < d @@ -26417,53 +8874,25 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0_0 := v_0_0.Args[_i0] + if v_0_0_0.Op != OpConst16 { + continue + } + c := v_0_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (ZeroExt16to64 (And16 _ (Const16 [c]))) (Const64 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd16 { - break - } - _ = v_0_0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst16 { - break - } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) // cond: 0 <= c && c < d @@ -26475,52 +8904,25 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst32 { + continue + } + c := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - return false -} -func rewriteValuegeneric_OpIsInBounds_20(v *Value) bool { - // match: (IsInBounds (And32 _ (Const32 [c])) (Const32 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) // cond: 0 <= c && c < d @@ -26536,53 +8938,25 @@ func rewriteValuegeneric_OpIsInBounds_20(v *Value) bool { break } _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0_0 := v_0_0.Args[_i0] + if v_0_0_0.Op != OpConst32 { + continue + } + c := v_0_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (ZeroExt32to64 (And32 _ (Const32 [c]))) (Const64 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt32to64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd32 { - break - } - _ = v_0_0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst32 { - break - } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) // cond: 0 <= c && c < d @@ -26594,49 +8968,25 @@ func rewriteValuegeneric_OpIsInBounds_20(v *Value) bool { break } _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst64 { + continue + } + c := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - c := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (And64 _ (Const64 [c])) (Const64 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (IsInBounds (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(0 <= c && c < d)]) @@ -26739,7 +9089,7 @@ func rewriteValuegeneric_OpIsInBounds_20(v *Value) bool { } return false } -func rewriteValuegeneric_OpIsInBounds_30(v *Value) bool { +func rewriteValuegeneric_OpIsInBounds_20(v *Value) bool { // match: (IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) // cond: 0 < c && c < 8 && 1< n (Const16 [c])) // cond: isPowerOfTwo(c) @@ -31756,44 +14024,25 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpLsh16x64) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh16x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } - // match: (Mul16 (Const16 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh16x64 n (Const64 [log2(c)])) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh16x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true + break } // match: (Mul16 n (Const16 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) @@ -31801,184 +14050,74 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + c := v_1.AuxInt + if !(t.IsSigned() && isPowerOfTwo(-c)) { + continue + } + v.reset(OpNeg16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = log2(-c) + v0.AddArg(v1) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg16) - v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true + break } - // match: (Mul16 (Const16 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg16 (Lsh16x64 n (Const64 [log2(-c)]))) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg16) - v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpMul16_10(v *Value) bool { - b := v.Block // match: (Mul16 (Const16 [0]) _) // result: (Const16 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst16) + v.AuxInt = 0 + return true } - v.reset(OpConst16) - v.AuxInt = 0 - return true - } - // match: (Mul16 _ (Const16 [0])) - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true + break } // match: (Mul16 (Const16 [c]) (Mul16 (Const16 [d]) x)) // result: (Mul16 (Const16 [int64(int16(c*d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c * d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul16 (Const16 [c]) (Mul16 x (Const16 [d]))) - // result: (Mul16 (Const16 [int64(int16(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul16 (Mul16 (Const16 [d]) x) (Const16 [c])) - // result: (Mul16 (Const16 [int64(int16(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul16 (Mul16 x (Const16 [d])) (Const16 [c])) - // result: (Mul16 (Const16 [int64(int16(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -31989,89 +14128,55 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // result: (Const32 [int64(int32(c*d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(c * d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c * d)) - return true - } - // match: (Mul32 (Const32 [d]) (Const32 [c])) - // result: (Const32 [int64(int32(c*d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c * d)) - return true + break } // match: (Mul32 (Const32 [1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul32 x (Const32 [1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Mul32 (Const32 [-1]) x) // result: (Neg32 x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != -1 { - break - } - v.reset(OpNeg32) - v.AddArg(x) - return true - } - // match: (Mul32 x (Const32 [-1])) - // result: (Neg32 x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpNeg32) + v.AddArg(x) + return true } - v.reset(OpNeg32) - v.AddArg(x) - return true + break } // match: (Mul32 n (Const32 [c])) // cond: isPowerOfTwo(c) @@ -32079,44 +14184,25 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpLsh32x64) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh32x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } - // match: (Mul32 (Const32 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh32x64 n (Const64 [log2(c)])) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh32x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true + break } // match: (Mul32 n (Const32 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) @@ -32124,314 +14210,112 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + c := v_1.AuxInt + if !(t.IsSigned() && isPowerOfTwo(-c)) { + continue + } + v.reset(OpNeg32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = log2(-c) + v0.AddArg(v1) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg32) - v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true + break } - // match: (Mul32 (Const32 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg32 (Lsh32x64 n (Const64 [log2(-c)]))) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg32) - v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpMul32_10(v *Value) bool { - b := v.Block // match: (Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd32 || v_1.Type != t { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c * d)) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpMul32, t) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = c + v1.AddArg(v2) + v1.AddArg(x) + v.AddArg(v1) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul32 (Const32 [c]) (Add32 x (Const32 [d]))) - // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul32 (Add32 (Const32 [d]) x) (Const32 [c])) - // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 || v_0_0.Type != t { - break - } - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul32 (Add32 x (Const32 [d])) (Const32 [c])) - // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 || v_0_1.Type != t { - break - } - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true + break } // match: (Mul32 (Const32 [0]) _) // result: (Const32 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst32) + v.AuxInt = 0 + return true } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } - // match: (Mul32 _ (Const32 [0])) - // result: (Const32 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true + break } // match: (Mul32 (Const32 [c]) (Mul32 (Const32 [d]) x)) // result: (Mul32 (Const32 [int64(int32(c*d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c * d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul32 (Const32 [c]) (Mul32 x (Const32 [d]))) - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul32 (Mul32 (Const32 [d]) x) (Const32 [c])) - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul32 (Mul32 x (Const32 [d])) (Const32 [c])) - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -32440,116 +14324,72 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F { + continue + } + d := v_1.AuxInt + v.reset(OpConst32F) + v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - d := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) - return true - } - // match: (Mul32F (Const32F [d]) (Const32F [c])) - // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - c := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) - return true + break } // match: (Mul32F x (Const32F [auxFrom64F(1)])) // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom64F(1) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom64F(1) { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul32F (Const32F [auxFrom64F(1)]) x) - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F || v_0.AuxInt != auxFrom64F(1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Mul32F x (Const32F [auxFrom32F(-1)])) // result: (Neg32F x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom32F(-1) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom32F(-1) { + continue + } + v.reset(OpNeg32F) + v.AddArg(x) + return true } - v.reset(OpNeg32F) - v.AddArg(x) - return true - } - // match: (Mul32F (Const32F [auxFrom32F(-1)]) x) - // result: (Neg32F x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F || v_0.AuxInt != auxFrom32F(-1) { - break - } - v.reset(OpNeg32F) - v.AddArg(x) - return true + break } // match: (Mul32F x (Const32F [auxFrom32F(2)])) // result: (Add32F x x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom32F(2) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom32F(2) { + continue + } + v.reset(OpAdd32F) + v.AddArg(x) + v.AddArg(x) + return true } - v.reset(OpAdd32F) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (Mul32F (Const32F [auxFrom32F(2)]) x) - // result: (Add32F x x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F || v_0.AuxInt != auxFrom32F(2) { - break - } - v.reset(OpAdd32F) - v.AddArg(x) - v.AddArg(x) - return true + break } return false } @@ -32560,89 +14400,55 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // result: (Const64 [c*d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConst64) + v.AuxInt = c * d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c * d - return true - } - // match: (Mul64 (Const64 [d]) (Const64 [c])) - // result: (Const64 [c*d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c * d - return true + break } // match: (Mul64 (Const64 [1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul64 x (Const64 [1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Mul64 (Const64 [-1]) x) // result: (Neg64 x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != -1 { - break - } - v.reset(OpNeg64) - v.AddArg(x) - return true - } - // match: (Mul64 x (Const64 [-1])) - // result: (Neg64 x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpNeg64) + v.AddArg(x) + return true } - v.reset(OpNeg64) - v.AddArg(x) - return true + break } // match: (Mul64 n (Const64 [c])) // cond: isPowerOfTwo(c) @@ -32650,44 +14456,25 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpLsh64x64) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh64x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } - // match: (Mul64 (Const64 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh64x64 n (Const64 [log2(c)])) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh64x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true + break } // match: (Mul64 n (Const64 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) @@ -32695,314 +14482,112 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + c := v_1.AuxInt + if !(t.IsSigned() && isPowerOfTwo(-c)) { + continue + } + v.reset(OpNeg64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = log2(-c) + v0.AddArg(v1) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg64) - v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true + break } - // match: (Mul64 (Const64 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg64 (Lsh64x64 n (Const64 [log2(-c)]))) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - c := v_0.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg64) - v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpMul64_10(v *Value) bool { - b := v.Block // match: (Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd64 || v_1.Type != t { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c * d + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpMul64, t) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = c + v1.AddArg(v2) + v1.AddArg(x) + v.AddArg(v1) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul64 (Const64 [c]) (Add64 x (Const64 [d]))) - // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul64 (Add64 (Const64 [d]) x) (Const64 [c])) - // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 || v_0_0.Type != t { - break - } - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul64 (Add64 x (Const64 [d])) (Const64 [c])) - // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != t { - break - } - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true + break } // match: (Mul64 (Const64 [0]) _) // result: (Const64 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst64) + v.AuxInt = 0 + return true } - v.reset(OpConst64) - v.AuxInt = 0 - return true - } - // match: (Mul64 _ (Const64 [0])) - // result: (Const64 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true + break } // match: (Mul64 (Const64 [c]) (Mul64 (Const64 [d]) x)) // result: (Mul64 (Const64 [c*d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c * d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul64 (Const64 [c]) (Mul64 x (Const64 [d]))) - // result: (Mul64 (Const64 [c*d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul64 (Mul64 (Const64 [d]) x) (Const64 [c])) - // result: (Mul64 (Const64 [c*d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul64 (Mul64 x (Const64 [d])) (Const64 [c])) - // result: (Mul64 (Const64 [c*d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -33011,116 +14596,72 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F { + continue + } + d := v_1.AuxInt + v.reset(OpConst64F) + v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - d := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) - return true - } - // match: (Mul64F (Const64F [d]) (Const64F [c])) - // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - c := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) - return true + break } // match: (Mul64F x (Const64F [auxFrom64F(1)])) // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(1) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(1) { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul64F (Const64F [auxFrom64F(1)]) x) - // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F || v_0.AuxInt != auxFrom64F(1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Mul64F x (Const64F [auxFrom64F(-1)])) // result: (Neg64F x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(-1) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(-1) { + continue + } + v.reset(OpNeg64F) + v.AddArg(x) + return true } - v.reset(OpNeg64F) - v.AddArg(x) - return true - } - // match: (Mul64F (Const64F [auxFrom64F(-1)]) x) - // result: (Neg64F x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F || v_0.AuxInt != auxFrom64F(-1) { - break - } - v.reset(OpNeg64F) - v.AddArg(x) - return true + break } // match: (Mul64F x (Const64F [auxFrom64F(2)])) // result: (Add64F x x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(2) { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(2) { + continue + } + v.reset(OpAdd64F) + v.AddArg(x) + v.AddArg(x) + return true } - v.reset(OpAdd64F) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (Mul64F (Const64F [auxFrom64F(2)]) x) - // result: (Add64F x x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F || v_0.AuxInt != auxFrom64F(2) { - break - } - v.reset(OpAdd64F) - v.AddArg(x) - v.AddArg(x) - return true + break } return false } @@ -33131,89 +14672,55 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // result: (Const8 [int64(int8(c*d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConst8) + v.AuxInt = int64(int8(c * d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c * d)) - return true - } - // match: (Mul8 (Const8 [d]) (Const8 [c])) - // result: (Const8 [int64(int8(c*d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c * d)) - return true + break } // match: (Mul8 (Const8 [1]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul8 x (Const8 [1])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Mul8 (Const8 [-1]) x) // result: (Neg8 x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != -1 { - break - } - v.reset(OpNeg8) - v.AddArg(x) - return true - } - // match: (Mul8 x (Const8 [-1])) - // result: (Neg8 x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != -1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpNeg8) + v.AddArg(x) + return true } - v.reset(OpNeg8) - v.AddArg(x) - return true + break } // match: (Mul8 n (Const8 [c])) // cond: isPowerOfTwo(c) @@ -33221,44 +14728,25 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + continue + } + v.reset(OpLsh8x64) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh8x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } - // match: (Mul8 (Const8 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh8x64 n (Const64 [log2(c)])) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := v_0.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh8x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true + break } // match: (Mul8 n (Const8 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) @@ -33266,184 +14754,74 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { for { t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + c := v_1.AuxInt + if !(t.IsSigned() && isPowerOfTwo(-c)) { + continue + } + v.reset(OpNeg8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = log2(-c) + v0.AddArg(v1) + v.AddArg(v0) + return true } - c := v_1.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg8) - v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true + break } - // match: (Mul8 (Const8 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg8 (Lsh8x64 n (Const64 [log2(-c)]))) - for { - t := v.Type - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := v_0.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { - break - } - v.reset(OpNeg8) - v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpMul8_10(v *Value) bool { - b := v.Block // match: (Mul8 (Const8 [0]) _) // result: (Const8 [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 0 { + continue + } + v.reset(OpConst8) + v.AuxInt = 0 + return true } - v.reset(OpConst8) - v.AuxInt = 0 - return true - } - // match: (Mul8 _ (Const8 [0])) - // result: (Const8 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true + break } // match: (Mul8 (Const8 [c]) (Mul8 (Const8 [d]) x)) // result: (Mul8 (Const8 [int64(int8(c*d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c * d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul8 (Const8 [c]) (Mul8 x (Const8 [d]))) - // result: (Mul8 (Const8 [int64(int8(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul8 (Mul8 (Const8 [d]) x) (Const8 [c])) - // result: (Mul8 (Const8 [int64(int8(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul8 (Mul8 x (Const8 [d])) (Const8 [c])) - // result: (Mul8 (Const8 [int64(int8(c*d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -33739,595 +15117,189 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { // result: (Neq16 (Const16 [int64(int16(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq16 (Const16 [c]) (Add16 x (Const16 [d]))) - // result: (Neq16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq16 (Add16 (Const16 [d]) x) (Const16 [c])) - // result: (Neq16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq16 (Add16 x (Const16 [d])) (Const16 [c])) - // result: (Neq16 (Const16 [int64(int16(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Neq16 (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } - // match: (Neq16 (Const16 [d]) (Const16 [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 15 && kbar == 16 - k // result: (Neq16 (And16 n (Const16 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh16x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh16x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh16x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd16 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 15 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int64(1< 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Neq16 (And16 n (Const16 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh16x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh16x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd16 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh16Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh16x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 15 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Neq16 (And16 n (Const16 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh16x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh16x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd16 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh16Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh16x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 15 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Neq16 (And16 n (Const16 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh16x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh16x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd16 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh16Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh16x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 15 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 15 && kbar == 16-k) { - break - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int64(1< x (Const16 [y])) (Const16 [y])) // cond: isPowerOfTwo(y) // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd16 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst16 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq16 (And16 (Const16 [y]) x) (Const16 [y])) - // cond: isPowerOfTwo(y) - // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq16 (Const16 [y]) (And16 x (Const16 [y]))) - // cond: isPowerOfTwo(y) - // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq16 (Const16 [y]) (And16 (Const16 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -34349,595 +15321,189 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { // result: (Neq32 (Const32 [int64(int32(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq32 (Const32 [c]) (Add32 x (Const32 [d]))) - // result: (Neq32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq32 (Add32 (Const32 [d]) x) (Const32 [c])) - // result: (Neq32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq32 (Add32 x (Const32 [d])) (Const32 [c])) - // result: (Neq32 (Const32 [int64(int32(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Neq32 (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } - // match: (Neq32 (Const32 [d]) (Const32 [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (Neq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Neq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 31 && kbar == 32 - k // result: (Neq32 (And32 n (Const32 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh32x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh32x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd32 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 31 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int64(1< 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Neq32 (And32 n (Const32 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh32x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd32 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh32Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh32x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 31 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Neq32 (And32 n (Const32 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh32x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd32 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh32Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh32x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 31 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Neq32 (And32 n (Const32 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh32x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh32x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd32 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh32Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh32x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 31 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 31 && kbar == 32-k) { - break - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int64(1< x (Const32 [y])) (Const32 [y])) // cond: isPowerOfTwo(y) // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd32 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst32 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq32 (And32 (Const32 [y]) x) (Const32 [y])) - // cond: isPowerOfTwo(y) - // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq32 (Const32 [y]) (And32 x (Const32 [y]))) - // cond: isPowerOfTwo(y) - // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq32 (Const32 [y]) (And32 (Const32 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -34946,37 +15512,22 @@ func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32F { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d)) - return true - } - // match: (Neq32F (Const32F [d]) (Const32F [c])) - // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d)) - return true + break } return false } @@ -34998,595 +15549,189 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { // result: (Neq64 (Const64 [c-d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq64 (Const64 [c]) (Add64 x (Const64 [d]))) - // result: (Neq64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq64 (Add64 (Const64 [d]) x) (Const64 [c])) - // result: (Neq64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq64 (Add64 x (Const64 [d])) (Const64 [c])) - // result: (Neq64 (Const64 [c-d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Neq64 (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } - // match: (Neq64 (Const64 [d]) (Const64 [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (Neq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Neq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 63 && kbar == 64 - k // result: (Neq64 (And64 n (Const64 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh64x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh64x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd64 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 63 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64(1< 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Neq64 (And64 n (Const64 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh64x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd64 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh64Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh64x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 63 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Neq64 (And64 n (Const64 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh64x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd64 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh64Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh64x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 63 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Neq64 (And64 n (Const64 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh64x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh64x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd64 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh64Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh64x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 63 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 63 && kbar == 64-k) { - break - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64(1< x (Const64 [y])) (Const64 [y])) // cond: isPowerOfTwo(y) // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd64 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst64 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq64 (And64 (Const64 [y]) x) (Const64 [y])) - // cond: isPowerOfTwo(y) - // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq64 (Const64 [y]) (And64 x (Const64 [y]))) - // cond: isPowerOfTwo(y) - // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq64 (Const64 [y]) (And64 (Const64 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -35595,37 +15740,22 @@ func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64F { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64F { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d)) - return true - } - // match: (Neq64F (Const64F [d]) (Const64F [c])) - // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d)) - return true + break } return false } @@ -35647,595 +15777,189 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { // result: (Neq8 (Const8 [int64(int8(c-d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c - d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq8 (Const8 [c]) (Add8 x (Const8 [d]))) - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq8 (Add8 (Const8 [d]) x) (Const8 [c])) - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq8 (Add8 x (Const8 [d])) (Const8 [c])) - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Neq8 (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } - // match: (Neq8 (Const8 [d]) (Const8 [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (Neq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k]))) + // match: (Neq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 7 && kbar == 8 - k // result: (Neq8 (And8 n (Const8 [int64(1< [0])) for { _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh8x64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + n := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLsh8x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh8x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd8 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if n != v_1_0_0.Args[_i1] { + continue + } + v_1_0_0_1 := v_1_0_0.Args[1^_i1] + if v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || v_1_0_0_1_0_1.AuxInt != 7 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := v_1_0_0_1_1.AuxInt + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := v_1_0_1.AuxInt + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(n) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int64(1< 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k]))) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Neq8 (And8 n (Const8 [int64(1< [0])) - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLsh8x64 { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh8x64 { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd8 { - break - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - if v_1_0_0_0.Op != OpRsh8Ux64 || v_1_0_0_0.Type != t { - break - } - _ = v_1_0_0_0.Args[1] - v_1_0_0_0_0 := v_1_0_0_0.Args[0] - if v_1_0_0_0_0.Op != OpRsh8x64 || v_1_0_0_0_0.Type != t { - break - } - _ = v_1_0_0_0_0.Args[1] - if n != v_1_0_0_0_0.Args[0] { - break - } - v_1_0_0_0_0_1 := v_1_0_0_0_0.Args[1] - if v_1_0_0_0_0_1.Op != OpConst64 || v_1_0_0_0_0_1.Type != typ.UInt64 || v_1_0_0_0_0_1.AuxInt != 7 { - break - } - v_1_0_0_0_1 := v_1_0_0_0.Args[1] - if v_1_0_0_0_1.Op != OpConst64 || v_1_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_1_0_0_0_1.AuxInt - if n != v_1_0_0.Args[1] { - break - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - break - } - k := v_1_0_1.AuxInt - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || v_1_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Neq8 (And8 n (Const8 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh8x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh8x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd8 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - if n != v_0_0_0.Args[0] { - break - } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpRsh8Ux64 || v_0_0_0_1.Type != t { - break - } - _ = v_0_0_0_1.Args[1] - v_0_0_0_1_0 := v_0_0_0_1.Args[0] - if v_0_0_0_1_0.Op != OpRsh8x64 || v_0_0_0_1_0.Type != t { - break - } - _ = v_0_0_0_1_0.Args[1] - if n != v_0_0_0_1_0.Args[0] { - break - } - v_0_0_0_1_0_1 := v_0_0_0_1_0.Args[1] - if v_0_0_0_1_0_1.Op != OpConst64 || v_0_0_0_1_0_1.Type != typ.UInt64 || v_0_0_0_1_0_1.AuxInt != 7 { - break - } - v_0_0_0_1_1 := v_0_0_0_1.Args[1] - if v_0_0_0_1_1.Op != OpConst64 || v_0_0_0_1_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_1_1.AuxInt - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar])) n) (Const64 [k])) (Const64 [k])) n) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Neq8 (And8 n (Const8 [int64(1< [0])) - for { - n := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLsh8x64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRsh8x64 { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAdd8 { - break - } - t := v_0_0_0.Type - _ = v_0_0_0.Args[1] - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpRsh8Ux64 || v_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0.Args[1] - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpRsh8x64 || v_0_0_0_0_0.Type != t { - break - } - _ = v_0_0_0_0_0.Args[1] - if n != v_0_0_0_0_0.Args[0] { - break - } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_0_1.Type != typ.UInt64 || v_0_0_0_0_0_1.AuxInt != 7 { - break - } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpConst64 || v_0_0_0_0_1.Type != typ.UInt64 { - break - } - kbar := v_0_0_0_0_1.AuxInt - if n != v_0_0_0.Args[1] { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 || v_0_0_1.Type != typ.UInt64 { - break - } - k := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.Type != typ.UInt64 || v_0_1.AuxInt != k || !(k > 0 && k < 7 && kbar == 8-k) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int64(1< x (Const8 [y])) (Const8 [y])) // cond: isPowerOfTwo(y) // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd8 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst8 || v_0_1.Type != t { + continue + } + y := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } } - t := v_0.Type - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 || v_0_1.Type != t { - break - } - y := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq8 (And8 (Const8 [y]) x) (Const8 [y])) - // cond: isPowerOfTwo(y) - // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - t := v_0.Type - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 || v_0_0.Type != t { - break - } - y := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq8 (Const8 [y]) (And8 x (Const8 [y]))) - // cond: isPowerOfTwo(y) - // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 || v_1.Type != t { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t || v_1_1.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true - } - // match: (Neq8 (Const8 [y]) (And8 (Const8 [y]) x)) - // cond: isPowerOfTwo(y) - // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - y := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 || v_1.Type != t { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t || v_1_0.AuxInt != y || !(isPowerOfTwo(y)) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = y - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = 0 - v.AddArg(v2) - return true + break } return false } @@ -36244,127 +15968,77 @@ func rewriteValuegeneric_OpNeqB_0(v *Value) bool { // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstBool { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConstBool { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (NeqB (ConstBool [d]) (ConstBool [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } // match: (NeqB (ConstBool [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (NeqB x (ConstBool [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstBool || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstBool || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (NeqB (ConstBool [1]) x) // result: (Not x) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstBool || v_0.AuxInt != 1 { - break - } - v.reset(OpNot) - v.AddArg(x) - return true - } - // match: (NeqB x (ConstBool [1])) - // result: (Not x) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstBool || v_1.AuxInt != 1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstBool || v_0.AuxInt != 1 { + continue + } + x := v.Args[1^_i0] + v.reset(OpNot) + v.AddArg(x) + return true } - v.reset(OpNot) - v.AddArg(x) - return true + break } // match: (NeqB (Not x) (Not y)) // result: (NeqB x y) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpNot { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpNot { + continue + } + x := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpNot { + continue + } + y := v_1.Args[0] + v.reset(OpNeqB) + v.AddArg(x) + v.AddArg(y) + return true } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpNot { - break - } - y := v_1.Args[0] - v.reset(OpNeqB) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (NeqB (Not y) (Not x)) - // result: (NeqB x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpNot { - break - } - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpNot { - break - } - x := v_1.Args[0] - v.reset(OpNeqB) - v.AddArg(x) - v.AddArg(y) - return true + break } return false } @@ -36402,735 +16076,423 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // result: (ConstBool [b2i(a != b)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAddr { + continue + } + a := v_0.Aux + v_1 := v.Args[1^_i0] + if v_1.Op != OpAddr { + continue + } + b := v_1.Aux + v.reset(OpConstBool) + v.AuxInt = b2i(a != b) + return true } - a := v_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - b := v_1.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a != b) - return true - } - // match: (NeqPtr (Addr {b} _) (Addr {a} _)) - // result: (ConstBool [b2i(a != b)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - b := v_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - a := v_1.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a != b) - return true + break } // match: (NeqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) // result: (ConstBool [b2i(a != b || o != 0)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAddr { + continue + } + a := v_0.Aux + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + b := v_1_0.Aux + v.reset(OpConstBool) + v.AuxInt = b2i(a != b || o != 0) + return true } - a := v_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - b := v_1_0.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o != 0) - return true - } - // match: (NeqPtr (OffPtr [o] (Addr {b} _)) (Addr {a} _)) - // result: (ConstBool [b2i(a != b || o != 0)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - b := v_0_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - a := v_1.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o != 0) - return true + break } // match: (NeqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) // result: (ConstBool [b2i(a != b || o1 != o2)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + continue + } + a := v_0_0.Aux + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o2 := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + b := v_1_0.Aux + v.reset(OpConstBool) + v.AuxInt = b2i(a != b || o1 != o2) + return true } - o1 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - a := v_0_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o2 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - b := v_1_0.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o1 != o2) - return true - } - // match: (NeqPtr (OffPtr [o2] (Addr {b} _)) (OffPtr [o1] (Addr {a} _))) - // result: (ConstBool [b2i(a != b || o1 != o2)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o2 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - b := v_0_0.Aux - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o1 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - a := v_1_0.Aux - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o1 != o2) - return true + break } // match: (NeqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) // result: (ConstBool [b2i(a != b)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + a := v_0.Aux + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpLocalAddr { + continue + } + b := v_1.Aux + _ = v_1.Args[1] + v.reset(OpConstBool) + v.AuxInt = b2i(a != b) + return true } - a := v_0.Aux - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - b := v_1.Aux - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a != b) - return true - } - // match: (NeqPtr (LocalAddr {b} _ _) (LocalAddr {a} _ _)) - // result: (ConstBool [b2i(a != b)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break - } - b := v_0.Aux - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - a := v_1.Aux - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a != b) - return true + break } // match: (NeqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a != b || o != 0)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + a := v_0.Aux + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + b := v_1_0.Aux + _ = v_1_0.Args[1] + v.reset(OpConstBool) + v.AuxInt = b2i(a != b || o != 0) + return true } - a := v_0.Aux - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - b := v_1_0.Aux - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o != 0) - return true - } - return false -} -func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { - // match: (NeqPtr (OffPtr [o] (LocalAddr {b} _ _)) (LocalAddr {a} _ _)) - // result: (ConstBool [b2i(a != b || o != 0)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - b := v_0_0.Aux - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - a := v_1.Aux - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o != 0) - return true + break } // match: (NeqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a != b || o1 != o2)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + a := v_0_0.Aux + _ = v_0_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o2 := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + b := v_1_0.Aux + _ = v_1_0.Args[1] + v.reset(OpConstBool) + v.AuxInt = b2i(a != b || o1 != o2) + return true } - o1 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - a := v_0_0.Aux - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o2 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - b := v_1_0.Aux - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o1 != o2) - return true - } - // match: (NeqPtr (OffPtr [o2] (LocalAddr {b} _ _)) (OffPtr [o1] (LocalAddr {a} _ _))) - // result: (ConstBool [b2i(a != b || o1 != o2)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o2 := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - b := v_0_0.Aux - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o1 := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - a := v_1_0.Aux - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = b2i(a != b || o1 != o2) - return true + break } // match: (NeqPtr (OffPtr [o1] p1) p2) // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 != 0)]) - for { - p2 := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o1 := v_0.AuxInt - p1 := v_0.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 != 0) - return true - } - // match: (NeqPtr p2 (OffPtr [o1] p1)) - // cond: isSamePtr(p1, p2) - // result: (ConstBool [b2i(o1 != 0)]) for { _ = v.Args[1] - p2 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + p1 := v_0.Args[0] + p2 := v.Args[1^_i0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = b2i(o1 != 0) + return true } - o1 := v_1.AuxInt - p1 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 != 0) - return true + break } // match: (NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 != o2)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + o1 := v_0.AuxInt + p1 := v_0.Args[0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + o2 := v_1.AuxInt + p2 := v_1.Args[0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = b2i(o1 != o2) + return true } - o1 := v_0.AuxInt - p1 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o2 := v_1.AuxInt - p2 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 != o2) - return true - } - // match: (NeqPtr (OffPtr [o2] p2) (OffPtr [o1] p1)) - // cond: isSamePtr(p1, p2) - // result: (ConstBool [b2i(o1 != o2)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o2 := v_0.AuxInt - p2 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - o1 := v_1.AuxInt - p1 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(o1 != o2) - return true + break } // match: (NeqPtr (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (NeqPtr (Const32 [d]) (Const32 [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } + return false +} +func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c != d)]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - return false -} -func rewriteValuegeneric_OpNeqPtr_20(v *Value) bool { - // match: (NeqPtr (Const64 [d]) (Const64 [c])) - // result: (ConstBool [b2i(c != d)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true + break } // match: (NeqPtr (LocalAddr _ _) (Addr _)) // result: (ConstBool [1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (NeqPtr (Addr _) (LocalAddr _ _)) - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) // result: (ConstBool [1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + _ = v_0_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (NeqPtr (Addr _) (OffPtr (LocalAddr _ _))) - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) // result: (ConstBool [1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpLocalAddr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpLocalAddr { + continue + } + _ = v_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - _ = v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (NeqPtr (OffPtr (Addr _)) (LocalAddr _ _)) - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpLocalAddr { - break - } - _ = v_1.Args[1] - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) // result: (ConstBool [1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + _ = v_0_0.Args[1] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = 1 + return true } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLocalAddr { - break - } - _ = v_0_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAddr { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (NeqPtr (OffPtr (Addr _)) (OffPtr (LocalAddr _ _))) - // result: (ConstBool [1]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAddr { - break - } - v_1 := v.Args[1] - if v_1.Op != OpOffPtr { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpLocalAddr { - break - } - _ = v_1_0.Args[1] - v.reset(OpConstBool) - v.AuxInt = 1 - return true + break } // match: (NeqPtr (AddPtr p1 o1) p2) // cond: isSamePtr(p1, p2) // result: (IsNonNil o1) - for { - p2 := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAddPtr { - break - } - o1 := v_0.Args[1] - p1 := v_0.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpIsNonNil) - v.AddArg(o1) - return true - } - return false -} -func rewriteValuegeneric_OpNeqPtr_30(v *Value) bool { - // match: (NeqPtr p2 (AddPtr p1 o1)) - // cond: isSamePtr(p1, p2) - // result: (IsNonNil o1) for { _ = v.Args[1] - p2 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAddPtr { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAddPtr { + continue + } + o1 := v_0.Args[1] + p1 := v_0.Args[0] + p2 := v.Args[1^_i0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpIsNonNil) + v.AddArg(o1) + return true } - o1 := v_1.Args[1] - p1 := v_1.Args[0] - if !(isSamePtr(p1, p2)) { - break - } - v.reset(OpIsNonNil) - v.AddArg(o1) - return true + break } // match: (NeqPtr (Const32 [0]) p) // result: (IsNonNil p) - for { - p := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break - } - v.reset(OpIsNonNil) - v.AddArg(p) - return true - } - // match: (NeqPtr p (Const32 [0])) - // result: (IsNonNil p) for { _ = v.Args[1] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + p := v.Args[1^_i0] + v.reset(OpIsNonNil) + v.AddArg(p) + return true } - v.reset(OpIsNonNil) - v.AddArg(p) - return true + break } // match: (NeqPtr (Const64 [0]) p) // result: (IsNonNil p) - for { - p := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break - } - v.reset(OpIsNonNil) - v.AddArg(p) - return true - } - // match: (NeqPtr p (Const64 [0])) - // result: (IsNonNil p) for { _ = v.Args[1] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + p := v.Args[1^_i0] + v.reset(OpIsNonNil) + v.AddArg(p) + return true } - v.reset(OpIsNonNil) - v.AddArg(p) - return true + break } // match: (NeqPtr (ConstNil) p) // result: (IsNonNil p) - for { - p := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConstNil { - break - } - v.reset(OpIsNonNil) - v.AddArg(p) - return true - } - // match: (NeqPtr p (ConstNil)) - // result: (IsNonNil p) for { _ = v.Args[1] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstNil { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConstNil { + continue + } + p := v.Args[1^_i0] + v.reset(OpIsNonNil) + v.AddArg(p) + return true } - v.reset(OpIsNonNil) - v.AddArg(p) - return true + break } return false } @@ -37969,41 +17331,27 @@ func rewriteValuegeneric_OpOffPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpOr16_0(v *Value) bool { + b := v.Block // match: (Or16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c|d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + v.reset(OpConst16) + v.AuxInt = int64(int16(c | d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c | d)) - return true - } - // match: (Or16 (Const16 [d]) (Const16 [c])) - // result: (Const16 [int64(int16(c|d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c | d)) - return true + break } // match: (Or16 x x) // result: x @@ -38019,517 +17367,187 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { } // match: (Or16 (Const16 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or16 x (Const16 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Or16 (Const16 [-1]) _) // result: (Const16 [-1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != -1 { + continue + } + v.reset(OpConst16) + v.AuxInt = -1 + return true } - v.reset(OpConst16) - v.AuxInt = -1 - return true - } - // match: (Or16 _ (Const16 [-1])) - // result: (Const16 [-1]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != -1 { - break - } - v.reset(OpConst16) - v.AuxInt = -1 - return true + break } // match: (Or16 x (Or16 x y)) // result: (Or16 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or16 x (Or16 y x)) - // result: (Or16 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or16 (Or16 x y) x) - // result: (Or16 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValuegeneric_OpOr16_10(v *Value) bool { - b := v.Block - // match: (Or16 (Or16 y x) x) - // result: (Or16 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (Or16 (And16 x (Const16 [c2])) (Const16 [c1])) // cond: ^(c1 | c2) == 0 // result: (Or16 (Const16 [c1]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd16 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst16 { + continue + } + c2 := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + t := v_1.Type + c1 := v_1.AuxInt + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = c1 + v.AddArg(v0) + v.AddArg(x) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - c2 := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (And16 (Const16 [c2]) x) (Const16 [c1])) - // cond: ^(c1 | c2) == 0 - // result: (Or16 (Const16 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - c2 := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Const16 [c1]) (And16 x (Const16 [c2]))) - // cond: ^(c1 | c2) == 0 - // result: (Or16 (Const16 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - c2 := v_1_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Const16 [c1]) (And16 (Const16 [c2]) x)) - // cond: ^(c1 | c2) == 0 - // result: (Or16 (Const16 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - c2 := v_1_0.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Or16 (Or16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Or16 i (Or16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 (Or16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 x (Or16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOr16 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpOr16) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpOr16, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 x (Or16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Or16 (Const16 [c]) (Or16 (Const16 [d]) x)) // result: (Or16 (Const16 [int64(int16(c|d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c | d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpOr16_20(v *Value) bool { - b := v.Block - // match: (Or16 (Const16 [c]) (Or16 x (Const16 [d]))) - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Or16 (Const16 [d]) x) (Const16 [c])) - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Or16 x (Const16 [d])) (Const16 [c])) - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } func rewriteValuegeneric_OpOr32_0(v *Value) bool { + b := v.Block // match: (Or32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c|d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(c | d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c | d)) - return true - } - // match: (Or32 (Const32 [d]) (Const32 [c])) - // result: (Const32 [int64(int32(c|d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c | d)) - return true + break } // match: (Or32 x x) // result: x @@ -38545,517 +17563,187 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { } // match: (Or32 (Const32 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or32 x (Const32 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Or32 (Const32 [-1]) _) // result: (Const32 [-1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != -1 { + continue + } + v.reset(OpConst32) + v.AuxInt = -1 + return true } - v.reset(OpConst32) - v.AuxInt = -1 - return true - } - // match: (Or32 _ (Const32 [-1])) - // result: (Const32 [-1]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != -1 { - break - } - v.reset(OpConst32) - v.AuxInt = -1 - return true + break } // match: (Or32 x (Or32 x y)) // result: (Or32 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpOr32) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or32 x (Or32 y x)) - // result: (Or32 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or32 (Or32 x y) x) - // result: (Or32 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValuegeneric_OpOr32_10(v *Value) bool { - b := v.Block - // match: (Or32 (Or32 y x) x) - // result: (Or32 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (Or32 (And32 x (Const32 [c2])) (Const32 [c1])) // cond: ^(c1 | c2) == 0 // result: (Or32 (Const32 [c1]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd32 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst32 { + continue + } + c2 := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + t := v_1.Type + c1 := v_1.AuxInt + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = c1 + v.AddArg(v0) + v.AddArg(x) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - c2 := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or32 (And32 (Const32 [c2]) x) (Const32 [c1])) - // cond: ^(c1 | c2) == 0 - // result: (Or32 (Const32 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - c2 := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or32 (Const32 [c1]) (And32 x (Const32 [c2]))) - // cond: ^(c1 | c2) == 0 - // result: (Or32 (Const32 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - c2 := v_1_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or32 (Const32 [c1]) (And32 (Const32 [c2]) x)) - // cond: ^(c1 | c2) == 0 - // result: (Or32 (Const32 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - c2 := v_1_0.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Or32 (Or32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Or32 i (Or32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or32 (Or32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or32 x (Or32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOr32 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpOr32) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpOr32, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or32 x (Or32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Or32 (Const32 [c]) (Or32 (Const32 [d]) x)) // result: (Or32 (Const32 [int64(int32(c|d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c | d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpOr32_20(v *Value) bool { - b := v.Block - // match: (Or32 (Const32 [c]) (Or32 x (Const32 [d]))) - // result: (Or32 (Const32 [int64(int32(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or32 (Or32 (Const32 [d]) x) (Const32 [c])) - // result: (Or32 (Const32 [int64(int32(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or32 (Or32 x (Const32 [d])) (Const32 [c])) - // result: (Or32 (Const32 [int64(int32(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } func rewriteValuegeneric_OpOr64_0(v *Value) bool { + b := v.Block // match: (Or64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c|d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConst64) + v.AuxInt = c | d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c | d - return true - } - // match: (Or64 (Const64 [d]) (Const64 [c])) - // result: (Const64 [c|d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c | d - return true + break } // match: (Or64 x x) // result: x @@ -39071,517 +17759,187 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { } // match: (Or64 (Const64 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or64 x (Const64 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Or64 (Const64 [-1]) _) // result: (Const64 [-1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != -1 { + continue + } + v.reset(OpConst64) + v.AuxInt = -1 + return true } - v.reset(OpConst64) - v.AuxInt = -1 - return true - } - // match: (Or64 _ (Const64 [-1])) - // result: (Const64 [-1]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != -1 { - break - } - v.reset(OpConst64) - v.AuxInt = -1 - return true + break } // match: (Or64 x (Or64 x y)) // result: (Or64 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpOr64) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpOr64) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or64 x (Or64 y x)) - // result: (Or64 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpOr64) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or64 (Or64 x y) x) - // result: (Or64 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpOr64) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValuegeneric_OpOr64_10(v *Value) bool { - b := v.Block - // match: (Or64 (Or64 y x) x) - // result: (Or64 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpOr64) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (Or64 (And64 x (Const64 [c2])) (Const64 [c1])) // cond: ^(c1 | c2) == 0 // result: (Or64 (Const64 [c1]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd64 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst64 { + continue + } + c2 := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + t := v_1.Type + c1 := v_1.AuxInt + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c1 + v.AddArg(v0) + v.AddArg(x) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - c2 := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (And64 (Const64 [c2]) x) (Const64 [c1])) - // cond: ^(c1 | c2) == 0 - // result: (Or64 (Const64 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - c2 := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Const64 [c1]) (And64 x (Const64 [c2]))) - // cond: ^(c1 | c2) == 0 - // result: (Or64 (Const64 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - c2 := v_1_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Const64 [c1]) (And64 (Const64 [c2]) x)) - // cond: ^(c1 | c2) == 0 - // result: (Or64 (Const64 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - c2 := v_1_0.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Or64 (Or64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Or64 i (Or64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or64 (Or64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or64 x (Or64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOr64 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpOr64) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpOr64, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or64 x (Or64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) // result: (Or64 (Const64 [c|d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c | d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpOr64_20(v *Value) bool { - b := v.Block - // match: (Or64 (Const64 [c]) (Or64 x (Const64 [d]))) - // result: (Or64 (Const64 [c|d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Or64 (Const64 [d]) x) (Const64 [c])) - // result: (Or64 (Const64 [c|d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Or64 x (Const64 [d])) (Const64 [c])) - // result: (Or64 (Const64 [c|d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } func rewriteValuegeneric_OpOr8_0(v *Value) bool { + b := v.Block // match: (Or8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c|d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConst8) + v.AuxInt = int64(int8(c | d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c | d)) - return true - } - // match: (Or8 (Const8 [d]) (Const8 [c])) - // result: (Const8 [int64(int8(c|d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c | d)) - return true + break } // match: (Or8 x x) // result: x @@ -39597,478 +17955,162 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { } // match: (Or8 (Const8 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or8 x (Const8 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Or8 (Const8 [-1]) _) // result: (Const8 [-1]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != -1 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != -1 { + continue + } + v.reset(OpConst8) + v.AuxInt = -1 + return true } - v.reset(OpConst8) - v.AuxInt = -1 - return true - } - // match: (Or8 _ (Const8 [-1])) - // result: (Const8 [-1]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != -1 { - break - } - v.reset(OpConst8) - v.AuxInt = -1 - return true + break } // match: (Or8 x (Or8 x y)) // result: (Or8 x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpOr8) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or8 x (Or8 y x)) - // result: (Or8 x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr8 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpOr8) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or8 (Or8 x y) x) - // result: (Or8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr8 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpOr8) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValuegeneric_OpOr8_10(v *Value) bool { - b := v.Block - // match: (Or8 (Or8 y x) x) - // result: (Or8 x y) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr8 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpOr8) - v.AddArg(x) - v.AddArg(y) - return true + break } // match: (Or8 (And8 x (Const8 [c2])) (Const8 [c1])) // cond: ^(c1 | c2) == 0 // result: (Or8 (Const8 [c1]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpAnd8 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + x := v_0.Args[_i1] + v_0_1 := v_0.Args[1^_i1] + if v_0_1.Op != OpConst8 { + continue + } + c2 := v_0_1.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + t := v_1.Type + c1 := v_1.AuxInt + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c1 + v.AddArg(v0) + v.AddArg(x) + return true + } } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c2 := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or8 (And8 (Const8 [c2]) x) (Const8 [c1])) - // cond: ^(c1 | c2) == 0 - // result: (Or8 (Const8 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - c2 := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - t := v_1.Type - c1 := v_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or8 (Const8 [c1]) (And8 x (Const8 [c2]))) - // cond: ^(c1 | c2) == 0 - // result: (Or8 (Const8 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - c2 := v_1_1.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or8 (Const8 [c1]) (And8 (Const8 [c2]) x)) - // cond: ^(c1 | c2) == 0 - // result: (Or8 (Const8 [c1]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c1 := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - c2 := v_1_0.AuxInt - if !(^(c1 | c2) == 0) { - break - } - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = c1 - v.AddArg(v0) - v.AddArg(x) - return true + break } // match: (Or8 (Or8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Or8 i (Or8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr8 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or8 (Or8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr8 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or8 x (Or8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpOr8 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpOr8) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpOr8, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or8 x (Or8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) // result: (Or8 (Const8 [int64(int8(c|d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpOr8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c | d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpOr8_20(v *Value) bool { - b := v.Block - // match: (Or8 (Const8 [c]) (Or8 x (Const8 [d]))) - // result: (Or8 (Const8 [int64(int8(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or8 (Or8 (Const8 [d]) x) (Const8 [c])) - // result: (Or8 (Const8 [int64(int8(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or8 (Or8 x (Const8 [d])) (Const8 [c])) - // result: (Or8 (Const8 [int64(int8(c|d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpOr8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -42106,7 +20148,7 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7])) + // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7] )) // result: (Rsh8Ux64 x (Const64 [7] )) for { _ = v.Args[1] @@ -44294,106 +22336,30 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { if v_0.Op != OpMul16 { break } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + z := v_1.Args[1^_i1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub16 (Mul16 y x) (Mul16 x z)) - // result: (Mul16 x (Sub16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub16 (Mul16 x y) (Mul16 z x)) - // result: (Mul16 x (Sub16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub16 (Mul16 y x) (Mul16 z x)) - // result: (Mul16 x (Sub16 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } // match: (Sub16 x x) // result: (Const16 [0]) @@ -44408,23 +22374,6 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { } // match: (Sub16 (Add16 x y) x) // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Sub16 (Add16 y x) x) - // result: y for { x := v.Args[1] v_0 := v.Args[0] @@ -44432,14 +22381,17 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { break } _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + if x != v_0.Args[_i0] { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Sub16 (Add16 x y) y) // result: x @@ -44450,35 +22402,17 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - if y != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + if y != v_0.Args[1^_i0] { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpSub16_10(v *Value) bool { - b := v.Block - // match: (Sub16 (Add16 y x) y) - // result: x - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Sub16 x (Sub16 i:(Const16 ) z)) // cond: (z.Op != OpConst16 && x.Op != OpConst16) @@ -44643,106 +22577,30 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { if v_0.Op != OpMul32 { break } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + z := v_1.Args[1^_i1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub32 (Mul32 y x) (Mul32 x z)) - // result: (Mul32 x (Sub32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub32 (Mul32 x y) (Mul32 z x)) - // result: (Mul32 x (Sub32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub32 (Mul32 y x) (Mul32 z x)) - // result: (Mul32 x (Sub32 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } // match: (Sub32 x x) // result: (Const32 [0]) @@ -44757,23 +22615,6 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { } // match: (Sub32 (Add32 x y) x) // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Sub32 (Add32 y x) x) - // result: y for { x := v.Args[1] v_0 := v.Args[0] @@ -44781,14 +22622,17 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { break } _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + if x != v_0.Args[_i0] { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Sub32 (Add32 x y) y) // result: x @@ -44799,35 +22643,17 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - if y != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + if y != v_0.Args[1^_i0] { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpSub32_10(v *Value) bool { - b := v.Block - // match: (Sub32 (Add32 y x) y) - // result: x - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Sub32 x (Sub32 i:(Const32 ) z)) // cond: (z.Op != OpConst32 && x.Op != OpConst32) @@ -45013,106 +22839,30 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { if v_0.Op != OpMul64 { break } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + z := v_1.Args[1^_i1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub64 (Mul64 y x) (Mul64 x z)) - // result: (Mul64 x (Sub64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub64 (Mul64 x y) (Mul64 z x)) - // result: (Mul64 x (Sub64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub64 (Mul64 y x) (Mul64 z x)) - // result: (Mul64 x (Sub64 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } // match: (Sub64 x x) // result: (Const64 [0]) @@ -45127,23 +22877,6 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { } // match: (Sub64 (Add64 x y) x) // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Sub64 (Add64 y x) x) - // result: y for { x := v.Args[1] v_0 := v.Args[0] @@ -45151,14 +22884,17 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { break } _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + if x != v_0.Args[_i0] { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Sub64 (Add64 x y) y) // result: x @@ -45169,35 +22905,17 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - if y != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + if y != v_0.Args[1^_i0] { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpSub64_10(v *Value) bool { - b := v.Block - // match: (Sub64 (Add64 y x) y) - // result: x - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Sub64 x (Sub64 i:(Const64 ) z)) // cond: (z.Op != OpConst64 && x.Op != OpConst64) @@ -45383,106 +23101,30 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { if v_0.Op != OpMul8 { break } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break + _ = v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + y := v_0.Args[1^_i0] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + z := v_1.Args[1^_i1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub8 (Mul8 y x) (Mul8 x z)) - // result: (Mul8 x (Sub8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - z := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub8 (Mul8 x y) (Mul8 z x)) - // result: (Mul8 x (Sub8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Sub8 (Mul8 y x) (Mul8 z x)) - // result: (Mul8 x (Sub8 y z)) - for { - t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[1] - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpMul8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true + break } // match: (Sub8 x x) // result: (Const8 [0]) @@ -45497,23 +23139,6 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { } // match: (Sub8 (Add8 x y) x) // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Sub8 (Add8 y x) x) - // result: y for { x := v.Args[1] v_0 := v.Args[0] @@ -45521,14 +23146,17 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { break } _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + if x != v_0.Args[_i0] { + continue + } + y := v_0.Args[1^_i0] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Sub8 (Add8 x y) y) // result: x @@ -45539,35 +23167,17 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { break } _ = v_0.Args[1] - x := v_0.Args[0] - if y != v_0.Args[1] { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v_0.Args[_i0] + if y != v_0.Args[1^_i0] { + continue + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpSub8_10(v *Value) bool { - b := v.Block - // match: (Sub8 (Add8 y x) y) - // result: x - for { - y := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[1] - if y != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Sub8 x (Sub8 i:(Const8 ) z)) // cond: (z.Op != OpConst8 && x.Op != OpConst8) @@ -45723,45 +23333,27 @@ func rewriteValuegeneric_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 (And16 (Const16 [y]) x)) // cond: y&0xFF == 0xFF // result: (Trunc16to8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - y := v_0_0.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc16to8) - v.AddArg(x) - return true - } - // match: (Trunc16to8 (And16 x (Const16 [y]))) - // cond: y&0xFF == 0xFF - // result: (Trunc16to8 x) for { v_0 := v.Args[0] if v_0.Op != OpAnd16 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst16 { + continue + } + y := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc16to8) + v.AddArg(x) + return true } - y := v_0_1.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc16to8) - v.AddArg(x) - return true + break } return false } @@ -45831,45 +23423,27 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { // match: (Trunc32to16 (And32 (Const32 [y]) x)) // cond: y&0xFFFF == 0xFFFF // result: (Trunc32to16 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - y := v_0_0.AuxInt - if !(y&0xFFFF == 0xFFFF) { - break - } - v.reset(OpTrunc32to16) - v.AddArg(x) - return true - } - // match: (Trunc32to16 (And32 x (Const32 [y]))) - // cond: y&0xFFFF == 0xFFFF - // result: (Trunc32to16 x) for { v_0 := v.Args[0] if v_0.Op != OpAnd32 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst32 { + continue + } + y := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(y&0xFFFF == 0xFFFF) { + continue + } + v.reset(OpTrunc32to16) + v.AddArg(x) + return true } - y := v_0_1.AuxInt - if !(y&0xFFFF == 0xFFFF) { - break - } - v.reset(OpTrunc32to16) - v.AddArg(x) - return true + break } return false } @@ -45915,45 +23489,27 @@ func rewriteValuegeneric_OpTrunc32to8_0(v *Value) bool { // match: (Trunc32to8 (And32 (Const32 [y]) x)) // cond: y&0xFF == 0xFF // result: (Trunc32to8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - y := v_0_0.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc32to8) - v.AddArg(x) - return true - } - // match: (Trunc32to8 (And32 x (Const32 [y]))) - // cond: y&0xFF == 0xFF - // result: (Trunc32to8 x) for { v_0 := v.Args[0] if v_0.Op != OpAnd32 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst32 { + continue + } + y := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc32to8) + v.AddArg(x) + return true } - y := v_0_1.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc32to8) - v.AddArg(x) - return true + break } return false } @@ -46023,45 +23579,27 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { // match: (Trunc64to16 (And64 (Const64 [y]) x)) // cond: y&0xFFFF == 0xFFFF // result: (Trunc64to16 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - y := v_0_0.AuxInt - if !(y&0xFFFF == 0xFFFF) { - break - } - v.reset(OpTrunc64to16) - v.AddArg(x) - return true - } - // match: (Trunc64to16 (And64 x (Const64 [y]))) - // cond: y&0xFFFF == 0xFFFF - // result: (Trunc64to16 x) for { v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst64 { + continue + } + y := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(y&0xFFFF == 0xFFFF) { + continue + } + v.reset(OpTrunc64to16) + v.AddArg(x) + return true } - y := v_0_1.AuxInt - if !(y&0xFFFF == 0xFFFF) { - break - } - v.reset(OpTrunc64to16) - v.AddArg(x) - return true + break } return false } @@ -46155,45 +23693,27 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (And64 (Const64 [y]) x)) // cond: y&0xFFFFFFFF == 0xFFFFFFFF // result: (Trunc64to32 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - y := v_0_0.AuxInt - if !(y&0xFFFFFFFF == 0xFFFFFFFF) { - break - } - v.reset(OpTrunc64to32) - v.AddArg(x) - return true - } - // match: (Trunc64to32 (And64 x (Const64 [y]))) - // cond: y&0xFFFFFFFF == 0xFFFFFFFF - // result: (Trunc64to32 x) for { v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst64 { + continue + } + y := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(y&0xFFFFFFFF == 0xFFFFFFFF) { + continue + } + v.reset(OpTrunc64to32) + v.AddArg(x) + return true } - y := v_0_1.AuxInt - if !(y&0xFFFFFFFF == 0xFFFFFFFF) { - break - } - v.reset(OpTrunc64to32) - v.AddArg(x) - return true + break } return false } @@ -46239,45 +23759,27 @@ func rewriteValuegeneric_OpTrunc64to8_0(v *Value) bool { // match: (Trunc64to8 (And64 (Const64 [y]) x)) // cond: y&0xFF == 0xFF // result: (Trunc64to8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - y := v_0_0.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc64to8) - v.AddArg(x) - return true - } - // match: (Trunc64to8 (And64 x (Const64 [y]))) - // cond: y&0xFF == 0xFF - // result: (Trunc64to8 x) for { v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0_0 := v_0.Args[_i0] + if v_0_0.Op != OpConst64 { + continue + } + y := v_0_0.AuxInt + x := v_0.Args[1^_i0] + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc64to8) + v.AddArg(x) + return true } - y := v_0_1.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc64to8) - v.AddArg(x) - return true + break } return false } @@ -46287,37 +23789,22 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { // result: (Const16 [int64(int16(c^d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst16 { + continue + } + d := v_1.AuxInt + v.reset(OpConst16) + v.AuxInt = int64(int16(c ^ d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c ^ d)) - return true - } - // match: (Xor16 (Const16 [d]) (Const16 [c])) - // result: (Const16 [int64(int16(c^d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c ^ d)) - return true + break } // match: (Xor16 x x) // result: (Const16 [0]) @@ -46332,324 +23819,110 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { } // match: (Xor16 (Const16 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Xor16 x (Const16 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Xor16 x (Xor16 x y)) // result: y for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor16 x (Xor16 y x)) - // result: y - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor16 (Xor16 x y) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor16 (Xor16 y x) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Xor16 (Xor16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Xor16 i (Xor16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpXor16_10(v *Value) bool { - b := v.Block - // match: (Xor16 (Xor16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor16 x (Xor16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpXor16 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpXor16) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor16, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor16 x (Xor16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) // result: (Xor16 (Const16 [int64(int16(c^d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor16 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpXor16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c ^ d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor16 (Const16 [c]) (Xor16 x (Const16 [d]))) - // result: (Xor16 (Const16 [int64(int16(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor16 (Xor16 (Const16 [d]) x) (Const16 [c])) - // result: (Xor16 (Const16 [int64(int16(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor16 (Xor16 x (Const16 [d])) (Const16 [c])) - // result: (Xor16 (Const16 [int64(int16(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -46659,37 +23932,22 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { // result: (Const32 [int64(int32(c^d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst32 { + continue + } + d := v_1.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(c ^ d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c ^ d)) - return true - } - // match: (Xor32 (Const32 [d]) (Const32 [c])) - // result: (Const32 [int64(int32(c^d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c ^ d)) - return true + break } // match: (Xor32 x x) // result: (Const32 [0]) @@ -46704,324 +23962,110 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { } // match: (Xor32 (Const32 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Xor32 x (Const32 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Xor32 x (Xor32 x y)) // result: y for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor32 x (Xor32 y x)) - // result: y - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor32 (Xor32 x y) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor32 (Xor32 y x) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Xor32 (Xor32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Xor32 i (Xor32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpXor32_10(v *Value) bool { - b := v.Block - // match: (Xor32 (Xor32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor32 x (Xor32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpXor32 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpXor32) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor32, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor32 x (Xor32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) // result: (Xor32 (Const32 [int64(int32(c^d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor32 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpXor32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c ^ d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor32 (Const32 [c]) (Xor32 x (Const32 [d]))) - // result: (Xor32 (Const32 [int64(int32(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor32 (Xor32 (Const32 [d]) x) (Const32 [c])) - // result: (Xor32 (Const32 [int64(int32(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor32 (Xor32 x (Const32 [d])) (Const32 [c])) - // result: (Xor32 (Const32 [int64(int32(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -47031,37 +24075,22 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { // result: (Const64 [c^d]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst64 { + continue + } + d := v_1.AuxInt + v.reset(OpConst64) + v.AuxInt = c ^ d + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c ^ d - return true - } - // match: (Xor64 (Const64 [d]) (Const64 [c])) - // result: (Const64 [c^d]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c ^ d - return true + break } // match: (Xor64 x x) // result: (Const64 [0]) @@ -47076,324 +24105,110 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { } // match: (Xor64 (Const64 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Xor64 x (Const64 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Xor64 x (Xor64 x y)) // result: y for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor64 x (Xor64 y x)) - // result: y - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor64 (Xor64 x y) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor64 (Xor64 y x) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Xor64 (Xor64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Xor64 i (Xor64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpXor64_10(v *Value) bool { - b := v.Block - // match: (Xor64 (Xor64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor64 x (Xor64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpXor64 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpXor64) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor64, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor64 x (Xor64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) // result: (Xor64 (Const64 [c^d]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor64 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpXor64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c ^ d + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor64 (Const64 [c]) (Xor64 x (Const64 [d]))) - // result: (Xor64 (Const64 [c^d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor64 (Xor64 (Const64 [d]) x) (Const64 [c])) - // result: (Xor64 (Const64 [c^d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor64 (Xor64 x (Const64 [d])) (Const64 [c])) - // result: (Xor64 (Const64 [c^d]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d - v.AddArg(v0) - v.AddArg(x) - return true + break } return false } @@ -47403,37 +24218,22 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { // result: (Const8 [int64(int8(c^d))]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpConst8 { + continue + } + d := v_1.AuxInt + v.reset(OpConst8) + v.AuxInt = int64(int8(c ^ d)) + return true } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c ^ d)) - return true - } - // match: (Xor8 (Const8 [d]) (Const8 [c])) - // result: (Const8 [int64(int8(c^d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c ^ d)) - return true + break } // match: (Xor8 x x) // result: (Const8 [0]) @@ -47448,324 +24248,110 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { } // match: (Xor8 (Const8 [0]) x) // result: x - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 || v_0.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Xor8 x (Const8 [0])) - // result: x for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.AuxInt != 0 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 || v_0.AuxInt != 0 { + continue + } + x := v.Args[1^_i0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true + break } // match: (Xor8 x (Xor8 x y)) // result: y for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + x := v.Args[_i0] + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + if x != v_1.Args[_i1] { + continue + } + y := v_1.Args[1^_i1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } } - y := v_1.Args[1] - if x != v_1.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor8 x (Xor8 y x)) - // result: y - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - _ = v_1.Args[1] - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor8 (Xor8 x y) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - y := v_0.Args[1] - if x != v_0.Args[0] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor8 (Xor8 y x) x) - // result: y - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - _ = v_0.Args[1] - y := v_0.Args[0] - if x != v_0.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true + break } // match: (Xor8 (Xor8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Xor8 i (Xor8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - z := v_0.Args[1] - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpXor8_10(v *Value) bool { - b := v.Block - // match: (Xor8 (Xor8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Xor8 i (Xor8 z x)) - for { - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - _ = v_0.Args[1] - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor8 x (Xor8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Xor8 i (Xor8 z x)) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpXor8 { + continue + } + _ = v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + i := v_0.Args[_i1] + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0.Args[1^_i1] + x := v.Args[1^_i0] + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpXor8) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor8, t) + v0.AddArg(z) + v0.AddArg(x) + v.AddArg(v0) + return true + } } - z := v_1.Args[1] - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor8 x (Xor8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Xor8 i (Xor8 z x)) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - _ = v_1.Args[1] - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true + break } // match: (Xor8 (Const8 [c]) (Xor8 (Const8 [d]) x)) // result: (Xor8 (Const8 [int64(int8(c^d))]) x) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break + for _i0 := 0; _i0 <= 1; _i0++ { + v_0 := v.Args[_i0] + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1^_i0] + if v_1.Op != OpXor8 { + continue + } + _ = v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1++ { + v_1_0 := v_1.Args[_i1] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := v_1_0.AuxInt + x := v_1.Args[1^_i1] + v.reset(OpXor8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c ^ d)) + v.AddArg(v0) + v.AddArg(x) + return true + } } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - x := v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 || v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor8 (Const8 [c]) (Xor8 x (Const8 [d]))) - // result: (Xor8 (Const8 [int64(int8(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - _ = v_1.Args[1] - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 || v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor8 (Xor8 (Const8 [d]) x) (Const8 [c])) - // result: (Xor8 (Const8 [int64(int8(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor8 (Xor8 x (Const8 [d])) (Const8 [c])) - // result: (Xor8 (Const8 [int64(int8(c^d))]) x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 || v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true + break } return false }