diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index c39ded4f9c..b25f52a078 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -172,7 +172,7 @@ func genRulesSuffix(arch arch, suff string) { genFile := &File{arch: arch, suffix: suff} const chunkSize = 10 // Main rewrite routine is a switch on v.Op. - fn := &Func{kind: "Value"} + fn := &Func{kind: "Value", arglen: -1} sw := &Switch{expr: exprf("v.Op")} for _, op := range ops { @@ -218,6 +218,7 @@ func genRulesSuffix(arch arch, suff string) { fn := &Func{ kind: "Value", suffix: fmt.Sprintf("_%s_%d", op, chunk), + arglen: opByName(arch, op).argLength, } fn.add(declf("b", "v.Block")) fn.add(declf("config", "b.Func.Config")) @@ -229,7 +230,7 @@ func genRulesSuffix(arch arch, suff string) { } rr = &RuleRewrite{loc: rule.loc} rr.match, rr.cond, rr.result = rule.parse() - pos, _ := genMatch(rr, arch, rr.match) + pos, _ := genMatch(rr, arch, rr.match, fn.arglen >= 0) if pos == "" { pos = "v.Pos" } @@ -593,6 +594,11 @@ func fprint(w io.Writer, n Node) { f := f.(*Func) fmt.Fprintf(w, "func rewrite%s%s%s%s(", f.kind, n.arch.name, n.suffix, f.suffix) fmt.Fprintf(w, "%c *%s) bool {\n", strings.ToLower(f.kind)[0], f.kind) + if f.kind == "Value" && f.arglen > 0 { + for i := f.arglen - 1; i >= 0; i-- { + fmt.Fprintf(w, "v_%d := v.Args[%d]\n", i, i) + } + } for _, n := range f.list { fprint(w, n) @@ -677,7 +683,7 @@ func fprint(w io.Writer, n Node) { fmt.Fprintln(w) } case StartCommuteLoop: - fmt.Fprintf(w, "for _i%d := 0; _i%d <= 1; _i%d++ {\n", n.depth, n.depth, n.depth) + fmt.Fprintf(w, "for _i%[1]d := 0; _i%[1]d <= 1; _i%[1]d, %[2]s_0, %[2]s_1 = _i%[1]d + 1, %[2]s_1, %[2]s_0 {\n", n.depth, n.v) default: log.Fatalf("cannot print %T", n) } @@ -751,6 +757,7 @@ type ( bodyBase kind string // "Value" or "Block" suffix string + arglen int32 // if kind == "Value", number of args for this op } Switch struct { bodyBase // []*Case @@ -779,6 +786,7 @@ type ( } StartCommuteLoop struct { depth int + v string } ) @@ -835,7 +843,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { cname := fmt.Sprintf("b.Controls[%v]", i) vname := fmt.Sprintf("v_%v", i) rr.add(declf(vname, cname)) - p, op := genMatch0(rr, arch, arg, vname, nil) // TODO: pass non-nil cnt? + p, op := genMatch0(rr, arch, arg, vname, nil, false) // TODO: pass non-nil cnt? if op != "" { check := fmt.Sprintf("%s.Op == %s", cname, op) if rr.check == "" { @@ -948,12 +956,12 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { // genMatch returns the variable whose source position should be used for the // result (or "" if no opinion), and a boolean that reports whether the match can fail. -func genMatch(rr *RuleRewrite, arch arch, match string) (pos, checkOp string) { +func genMatch(rr *RuleRewrite, arch arch, match string, pregenTop bool) (pos, checkOp string) { cnt := varCount(rr.match, rr.cond) - return genMatch0(rr, arch, match, "v", cnt) + return genMatch0(rr, arch, match, "v", cnt, pregenTop) } -func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) (pos, checkOp string) { +func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, pregenTop bool) (pos, checkOp string) { if match[0] != '(' || match[len(match)-1] != ')' { log.Fatalf("non-compound expr in genMatch0: %q", match) } @@ -1002,7 +1010,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) } // Access last argument first to minimize bounds checks. - if n := len(args); n > 1 { + if n := len(args); n > 1 && !pregenTop { a := args[n-1] if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) { rr.add(declf(a, "%s.Args[%d]", v, n-1)) @@ -1013,25 +1021,28 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) rr.add(stmtf("_ = %s.Args[%d]", v, n-1)) } } + if commutative && !pregenTop { + for i := 0; i <= 1; i++ { + vname := fmt.Sprintf("%s_%d", v, i) + rr.add(declf(vname, "%s.Args[%d]", v, i)) + } + } var commuteDepth int if commutative { commuteDepth = rr.commuteDepth - rr.add(StartCommuteLoop{commuteDepth}) + rr.add(StartCommuteLoop{commuteDepth, v}) rr.commuteDepth++ } for i, arg := range args { - argidx := strconv.Itoa(i) - if commutative { - switch i { - case 0: - argidx = fmt.Sprintf("_i%d", commuteDepth) - case 1: - argidx = fmt.Sprintf("1^_i%d", commuteDepth) - } - } if arg == "_" { continue } + var rhs string + if (commutative && i < 2) || pregenTop { + rhs = fmt.Sprintf("%s_%d", v, i) + } else { + rhs = fmt.Sprintf("%s.Args[%d]", v, i) + } if !strings.Contains(arg, "(") { // leaf variable if rr.declared(arg) { @@ -1039,9 +1050,11 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) // the old definition and the new definition match. // For example, (add x x). Equality is just pointer equality // on Values (so cse is important to do before lowering). - rr.add(breakf("%s != %s.Args[%s]", arg, v, argidx)) + rr.add(breakf("%s != %s", arg, rhs)) } else { - rr.add(declf(arg, "%s.Args[%s]", v, argidx)) + if arg != rhs { + rr.add(declf(arg, "%s", rhs)) + } } continue } @@ -1058,10 +1071,12 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) log.Fatalf("don't name args 'b', it is ambiguous with blocks") } - rr.add(declf(argname, "%s.Args[%s]", v, argidx)) + if argname != rhs { + rr.add(declf(argname, "%s", rhs)) + } bexpr := exprf("%s.Op != addLater", argname) rr.add(&CondBreak{expr: bexpr}) - argPos, argCheckOp := genMatch0(rr, arch, arg, argname, cnt) + argPos, argCheckOp := genMatch0(rr, arch, arg, argname, cnt, false) bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp if argPos != "" { @@ -1546,3 +1561,22 @@ func isEllipsisValue(s string) bool { } return true } + +func opByName(arch arch, name string) opData { + name = name[2:] + for _, x := range genericOps { + if name == x.name { + return x + } + } + if arch.name != "generic" { + name = name[len(arch.name):] + for _, x := range arch.ops { + if name == x.name { + return x + } + } + } + log.Fatalf("failed to find op named %s in arch %s", name, arch.name) + panic("unreachable") +} diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 464e03ee41..35de8bcd91 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -708,17 +708,19 @@ func rewriteValue386(v *Value) bool { return false } func rewriteValue386_Op386ADCL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADCL x (MOVLconst [c]) f) // result: (ADCLconst [c] x f) for { - f := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } c := v_1.AuxInt + f := v_2 v.reset(Op386ADCLconst) v.AuxInt = c v.AddArg(x) @@ -730,13 +732,13 @@ func rewriteValue386_Op386ADCL_0(v *Value) bool { return false } func rewriteValue386_Op386ADDL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDL x (MOVLconst [c])) // result: (ADDLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } @@ -752,15 +754,12 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // cond: d == 32-c // result: (ROLLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRLconst { continue } @@ -780,15 +779,12 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // result: (ROLWconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRWconst { continue } @@ -808,15 +804,12 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // result: (ROLBconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRBconst { continue } @@ -834,10 +827,8 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // match: (ADDL x (SHLLconst [3] y)) // result: (LEAL8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 { continue } @@ -852,10 +843,8 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // match: (ADDL x (SHLLconst [2] y)) // result: (LEAL4 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { continue } @@ -870,10 +859,8 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // match: (ADDL x (SHLLconst [1] y)) // result: (LEAL2 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { continue } @@ -888,10 +875,8 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // match: (ADDL x (ADDL y y)) // result: (LEAL2 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386ADDL { continue } @@ -909,19 +894,19 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // match: (ADDL x (ADDL x y)) // result: (LEAL2 y x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386ADDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(Op386LEAL2) v.AddArg(y) v.AddArg(x) @@ -933,15 +918,13 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { // match: (ADDL (ADDLconst [c] x) y) // result: (LEAL1 [c] x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(Op386LEAL1) v.AuxInt = c v.AddArg(x) @@ -953,14 +936,14 @@ func rewriteValue386_Op386ADDL_0(v *Value) bool { return false } func rewriteValue386_Op386ADDL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDL x (LEAL [c] {s} y)) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAL1 [c] {s} x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386LEAL { continue } @@ -983,10 +966,9 @@ func rewriteValue386_Op386ADDL_10(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLload { continue } @@ -1011,10 +993,9 @@ func rewriteValue386_Op386ADDL_10(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLloadidx4 { continue } @@ -1040,10 +1021,8 @@ func rewriteValue386_Op386ADDL_10(v *Value) bool { // match: (ADDL x (NEGL y)) // result: (SUBL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386NEGL { continue } @@ -1058,13 +1037,13 @@ func rewriteValue386_Op386ADDL_10(v *Value) bool { return false } func rewriteValue386_Op386ADDLcarry_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDLcarry x (MOVLconst [c])) // result: (ADDLconstcarry [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } @@ -1079,11 +1058,11 @@ func rewriteValue386_Op386ADDLcarry_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDLconst [c] (ADDL x y)) // result: (LEAL1 [c] x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } @@ -1100,7 +1079,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (LEAL [c+d] {s} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } @@ -1121,7 +1099,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (LEAL1 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -1144,7 +1121,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (LEAL2 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386LEAL2 { break } @@ -1167,7 +1143,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (LEAL4 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -1190,7 +1165,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (LEAL8 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386LEAL8 { break } @@ -1213,7 +1187,7 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -1226,7 +1200,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (MOVLconst [int64(int32(c+d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -1239,7 +1212,6 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { // result: (ADDLconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } @@ -1253,6 +1225,8 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLconstmodify_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) @@ -1261,13 +1235,12 @@ func rewriteValue386_Op386ADDLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -1284,14 +1257,13 @@ func rewriteValue386_Op386ADDLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1305,6 +1277,9 @@ func rewriteValue386_Op386ADDLconstmodify_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) @@ -1313,14 +1288,13 @@ func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -1338,14 +1312,13 @@ func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2 * 4)) { break } @@ -1363,15 +1336,14 @@ func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1386,6 +1358,9 @@ func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -1394,14 +1369,13 @@ func rewriteValue386_Op386ADDLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -1419,15 +1393,14 @@ func rewriteValue386_Op386ADDLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1445,9 +1418,7 @@ func rewriteValue386_Op386ADDLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL4 { break } @@ -1455,6 +1426,7 @@ func rewriteValue386_Op386ADDLload_0(v *Value) bool { sym2 := v_1.Aux idx := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -1470,6 +1442,10 @@ func rewriteValue386_Op386ADDLload_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) @@ -1478,15 +1454,14 @@ func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -1505,15 +1480,14 @@ func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - base := v.Args[1] - v_2 := v.Args[2] + val := v_0 + base := v_1 if v_2.Op != Op386ADDLconst { break } off2 := v_2.AuxInt idx := v_2.Args[0] + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -1532,16 +1506,15 @@ func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1557,6 +1530,9 @@ func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) @@ -1565,14 +1541,13 @@ func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -1590,15 +1565,14 @@ func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1613,6 +1587,10 @@ func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { return false } func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) @@ -1621,15 +1599,14 @@ func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -1648,15 +1625,14 @@ func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -1675,16 +1651,15 @@ func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1703,14 +1678,13 @@ func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != Op386MOVLconst { break } c := v_2.AuxInt + mem := v_3 if !(validValAndOff(c, off)) { break } @@ -1725,16 +1699,17 @@ func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ADDSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVSDload { continue } @@ -1758,6 +1733,9 @@ func rewriteValue386_Op386ADDSD_0(v *Value) bool { return false } func rewriteValue386_Op386ADDSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -1766,14 +1744,13 @@ func rewriteValue386_Op386ADDSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -1791,15 +1768,14 @@ func rewriteValue386_Op386ADDSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1814,16 +1790,17 @@ func rewriteValue386_Op386ADDSDload_0(v *Value) bool { return false } func rewriteValue386_Op386ADDSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVSSload { continue } @@ -1847,6 +1824,9 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { return false } func rewriteValue386_Op386ADDSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -1855,14 +1835,13 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -1880,15 +1859,14 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -1903,13 +1881,13 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { return false } func rewriteValue386_Op386ANDL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ANDL x (MOVLconst [c])) // result: (ANDLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } @@ -1925,10 +1903,9 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLload { continue } @@ -1953,10 +1930,9 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLloadidx4 { continue } @@ -1982,8 +1958,8 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { // match: (ANDL x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -1994,11 +1970,11 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDLconst [c] (ANDLconst [d] x)) // result: (ANDLconst [c & d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -2026,7 +2002,7 @@ func rewriteValue386_Op386ANDLconst_0(v *Value) bool { // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == -1) { break } @@ -2039,7 +2015,6 @@ func rewriteValue386_Op386ANDLconst_0(v *Value) bool { // result: (MOVLconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2051,6 +2026,8 @@ func rewriteValue386_Op386ANDLconst_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLconstmodify_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) @@ -2059,13 +2036,12 @@ func rewriteValue386_Op386ANDLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -2082,14 +2058,13 @@ func rewriteValue386_Op386ANDLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -2103,6 +2078,9 @@ func rewriteValue386_Op386ANDLconstmodify_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ANDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) @@ -2111,14 +2089,13 @@ func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -2136,14 +2113,13 @@ func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2 * 4)) { break } @@ -2161,15 +2137,14 @@ func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -2184,6 +2159,9 @@ func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -2192,14 +2170,13 @@ func rewriteValue386_Op386ANDLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -2217,15 +2194,14 @@ func rewriteValue386_Op386ANDLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -2243,9 +2219,7 @@ func rewriteValue386_Op386ANDLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL4 { break } @@ -2253,6 +2227,7 @@ func rewriteValue386_Op386ANDLload_0(v *Value) bool { sym2 := v_1.Aux idx := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -2268,6 +2243,10 @@ func rewriteValue386_Op386ANDLload_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ANDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) @@ -2276,15 +2255,14 @@ func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -2303,15 +2281,14 @@ func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - base := v.Args[1] - v_2 := v.Args[2] + val := v_0 + base := v_1 if v_2.Op != Op386ADDLconst { break } off2 := v_2.AuxInt idx := v_2.Args[0] + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -2330,16 +2307,15 @@ func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -2355,6 +2331,9 @@ func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) @@ -2363,14 +2342,13 @@ func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -2388,15 +2366,14 @@ func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -2411,6 +2388,10 @@ func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { return false } func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ANDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) @@ -2419,15 +2400,14 @@ func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -2446,15 +2426,14 @@ func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -2473,16 +2452,15 @@ func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -2501,14 +2479,13 @@ func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != Op386MOVLconst { break } c := v_2.AuxInt + mem := v_3 if !(validValAndOff(c, off)) { break } @@ -2523,13 +2500,13 @@ func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386CMPB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPB x (MOVLconst [c])) // result: (CMPBconst x [int64(int8(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -2542,12 +2519,11 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { // match: (CMPB (MOVLconst [c]) x) // result: (InvertFlags (CMPBconst x [int64(int8(c))])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } c := v_0.AuxInt + x := v_1 v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) v0.AuxInt = int64(int8(c)) @@ -2559,8 +2535,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPBload {sym} [off] ptr x mem) for { - x := v.Args[1] - l := v.Args[0] + l := v_0 if l.Op != Op386MOVBload { break } @@ -2568,6 +2543,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] + x := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } @@ -2583,9 +2559,8 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVBload { break } @@ -2609,13 +2584,13 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { return false } func rewriteValue386_Op386CMPBconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)==int8(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2631,7 +2606,6 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2647,7 +2621,6 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2663,7 +2636,6 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2679,7 +2651,6 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2695,7 +2666,6 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -2713,7 +2683,7 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { if v.AuxInt != 0 { break } - l := v.Args[0] + l := v_0 if l.Op != Op386ANDL { break } @@ -2734,7 +2704,7 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { if v.AuxInt != 0 { break } - l := v.Args[0] + l := v_0 if l.Op != Op386ANDLconst { break } @@ -2754,7 +2724,7 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(Op386TESTB) v.AddArg(x) v.AddArg(x) @@ -2765,7 +2735,7 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { // result: @l.Block (CMPBconstload {sym} [makeValAndOff(c,off)] ptr mem) for { c := v.AuxInt - l := v.Args[0] + l := v_0 if l.Op != Op386MOVBload { break } @@ -2789,19 +2759,21 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool { return false } func rewriteValue386_Op386CMPBload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) // cond: validValAndOff(int64(int8(c)),off) // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validValAndOff(int64(int8(c)), off)) { break } @@ -2815,13 +2787,13 @@ func rewriteValue386_Op386CMPBload_0(v *Value) bool { return false } func rewriteValue386_Op386CMPL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPL x (MOVLconst [c])) // result: (CMPLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -2834,12 +2806,11 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { // match: (CMPL (MOVLconst [c]) x) // result: (InvertFlags (CMPLconst x [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } c := v_0.AuxInt + x := v_1 v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) v0.AuxInt = c @@ -2851,8 +2822,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPLload {sym} [off] ptr x mem) for { - x := v.Args[1] - l := v.Args[0] + l := v_0 if l.Op != Op386MOVLload { break } @@ -2860,6 +2830,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] + x := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } @@ -2875,9 +2846,8 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVLload { break } @@ -2901,12 +2871,12 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { return false } func rewriteValue386_Op386CMPLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPLconst (MOVLconst [x]) [y]) // cond: int32(x)==int32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2922,7 +2892,6 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2938,7 +2907,6 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2954,7 +2922,6 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2970,7 +2937,6 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -2986,7 +2952,6 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386SHRLconst { break } @@ -3002,7 +2967,6 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -3020,7 +2984,7 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { if v.AuxInt != 0 { break } - l := v.Args[0] + l := v_0 if l.Op != Op386ANDL { break } @@ -3041,7 +3005,7 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { if v.AuxInt != 0 { break } - l := v.Args[0] + l := v_0 if l.Op != Op386ANDLconst { break } @@ -3061,7 +3025,7 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(Op386TESTL) v.AddArg(x) v.AddArg(x) @@ -3070,13 +3034,14 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { return false } func rewriteValue386_Op386CMPLconst_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) for { c := v.AuxInt - l := v.Args[0] + l := v_0 if l.Op != Op386MOVLload { break } @@ -3100,19 +3065,21 @@ func rewriteValue386_Op386CMPLconst_10(v *Value) bool { return false } func rewriteValue386_Op386CMPLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) // cond: validValAndOff(int64(int32(c)),off) // result: (CMPLconstload {sym} [makeValAndOff(int64(int32(c)),off)] ptr mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validValAndOff(int64(int32(c)), off)) { break } @@ -3126,13 +3093,13 @@ func rewriteValue386_Op386CMPLload_0(v *Value) bool { return false } func rewriteValue386_Op386CMPW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVLconst [c])) // result: (CMPWconst x [int64(int16(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -3145,12 +3112,11 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { // match: (CMPW (MOVLconst [c]) x) // result: (InvertFlags (CMPWconst x [int64(int16(c))])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } c := v_0.AuxInt + x := v_1 v.reset(Op386InvertFlags) v0 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) v0.AuxInt = int64(int16(c)) @@ -3162,8 +3128,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPWload {sym} [off] ptr x mem) for { - x := v.Args[1] - l := v.Args[0] + l := v_0 if l.Op != Op386MOVWload { break } @@ -3171,6 +3136,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] + x := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } @@ -3186,9 +3152,8 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVWload { break } @@ -3212,13 +3177,13 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { return false } func rewriteValue386_Op386CMPWconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)==int16(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -3234,7 +3199,6 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -3250,7 +3214,6 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -3266,7 +3229,6 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -3282,7 +3244,6 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -3298,7 +3259,6 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -3316,7 +3276,7 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { if v.AuxInt != 0 { break } - l := v.Args[0] + l := v_0 if l.Op != Op386ANDL { break } @@ -3337,7 +3297,7 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { if v.AuxInt != 0 { break } - l := v.Args[0] + l := v_0 if l.Op != Op386ANDLconst { break } @@ -3357,7 +3317,7 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(Op386TESTW) v.AddArg(x) v.AddArg(x) @@ -3368,7 +3328,7 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { // result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem) for { c := v.AuxInt - l := v.Args[0] + l := v_0 if l.Op != Op386MOVWload { break } @@ -3392,19 +3352,21 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { return false } func rewriteValue386_Op386CMPWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) // cond: validValAndOff(int64(int16(c)),off) // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validValAndOff(int64(int16(c)), off)) { break } @@ -3418,15 +3380,16 @@ func rewriteValue386_Op386CMPWload_0(v *Value) bool { return false } func rewriteValue386_Op386DIVSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (DIVSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVSDload { break } @@ -3448,6 +3411,9 @@ func rewriteValue386_Op386DIVSD_0(v *Value) bool { return false } func rewriteValue386_Op386DIVSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -3456,14 +3422,13 @@ func rewriteValue386_Op386DIVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3481,15 +3446,14 @@ func rewriteValue386_Op386DIVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -3504,15 +3468,16 @@ func rewriteValue386_Op386DIVSDload_0(v *Value) bool { return false } func rewriteValue386_Op386DIVSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (DIVSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVSSload { break } @@ -3534,6 +3499,9 @@ func rewriteValue386_Op386DIVSS_0(v *Value) bool { return false } func rewriteValue386_Op386DIVSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -3542,14 +3510,13 @@ func rewriteValue386_Op386DIVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3567,15 +3534,14 @@ func rewriteValue386_Op386DIVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -3590,13 +3556,13 @@ func rewriteValue386_Op386DIVSSload_0(v *Value) bool { return false } func rewriteValue386_Op386LEAL_0(v *Value) bool { + v_0 := v.Args[0] // match: (LEAL [c] {s} (ADDLconst [d] x)) // cond: is32Bit(c+d) // result: (LEAL [c+d] {s} x) for { c := v.AuxInt s := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } @@ -3617,14 +3583,15 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - y := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if !(x.Op != OpSB && y.Op != OpSB) { continue } @@ -3643,7 +3610,6 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } @@ -3665,7 +3631,6 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -3689,7 +3654,6 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386LEAL2 { break } @@ -3713,7 +3677,6 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -3737,7 +3700,6 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - v_0 := v.Args[0] if v_0.Op != Op386LEAL8 { break } @@ -3758,21 +3720,21 @@ func rewriteValue386_Op386LEAL_0(v *Value) bool { return false } func rewriteValue386_Op386LEAL1_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAL1 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 if !(is32Bit(c+d) && x.Op != OpSB) { continue } @@ -3790,10 +3752,8 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { continue } @@ -3812,10 +3772,8 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { continue } @@ -3834,10 +3792,8 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 { continue } @@ -3857,16 +3813,14 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386LEAL { continue } off2 := v_0.AuxInt sym2 := v_0.Aux x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { continue } @@ -3882,19 +3836,20 @@ func rewriteValue386_Op386LEAL1_0(v *Value) bool { return false } func rewriteValue386_Op386LEAL2_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAL2 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt x := v_0.Args[0] + y := v_1 if !(is32Bit(c+d) && x.Op != OpSB) { break } @@ -3911,9 +3866,7 @@ func rewriteValue386_Op386LEAL2_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386ADDLconst { break } @@ -3934,9 +3887,7 @@ func rewriteValue386_Op386LEAL2_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { break } @@ -3953,9 +3904,7 @@ func rewriteValue386_Op386LEAL2_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { break } @@ -3973,14 +3922,13 @@ func rewriteValue386_Op386LEAL2_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux x := v_0.Args[0] + y := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } @@ -3994,19 +3942,20 @@ func rewriteValue386_Op386LEAL2_0(v *Value) bool { return false } func rewriteValue386_Op386LEAL4_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAL4 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt x := v_0.Args[0] + y := v_1 if !(is32Bit(c+d) && x.Op != OpSB) { break } @@ -4023,9 +3972,7 @@ func rewriteValue386_Op386LEAL4_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386ADDLconst { break } @@ -4046,9 +3993,7 @@ func rewriteValue386_Op386LEAL4_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { break } @@ -4066,14 +4011,13 @@ func rewriteValue386_Op386LEAL4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux x := v_0.Args[0] + y := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } @@ -4087,19 +4031,20 @@ func rewriteValue386_Op386LEAL4_0(v *Value) bool { return false } func rewriteValue386_Op386LEAL8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) // cond: is32Bit(c+d) && x.Op != OpSB // result: (LEAL8 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt x := v_0.Args[0] + y := v_1 if !(is32Bit(c+d) && x.Op != OpSB) { break } @@ -4116,9 +4061,7 @@ func rewriteValue386_Op386LEAL8_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386ADDLconst { break } @@ -4140,14 +4083,13 @@ func rewriteValue386_Op386LEAL8_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux x := v_0.Args[0] + y := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } @@ -4161,12 +4103,13 @@ func rewriteValue386_Op386LEAL8_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBLSX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBLSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVBload { break } @@ -4191,7 +4134,6 @@ func rewriteValue386_Op386MOVBLSX_0(v *Value) bool { // cond: c & 0x80 == 0 // result: (ANDLconst [c & 0x7f] x) for { - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -4208,6 +4150,8 @@ func rewriteValue386_Op386MOVBLSX_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBLSXload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -4216,9 +4160,7 @@ func rewriteValue386_Op386MOVBLSXload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVBstore { break } @@ -4240,14 +4182,13 @@ func rewriteValue386_Op386MOVBLSXload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4261,12 +4202,13 @@ func rewriteValue386_Op386MOVBLSXload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBLZX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVBload { break } @@ -4291,7 +4233,7 @@ func rewriteValue386_Op386MOVBLZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVBloadidx1 { break } @@ -4317,7 +4259,6 @@ func rewriteValue386_Op386MOVBLZX_0(v *Value) bool { // match: (MOVBLZX (ANDLconst [c] x)) // result: (ANDLconst [c & 0xff] x) for { - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -4331,6 +4272,8 @@ func rewriteValue386_Op386MOVBLZX_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) @@ -4339,9 +4282,7 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVBstore { break } @@ -4363,13 +4304,12 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4386,14 +4326,13 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4410,8 +4349,6 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -4419,6 +4356,7 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -4436,15 +4374,16 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -4464,8 +4403,6 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -4476,20 +4413,22 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -4505,15 +4444,14 @@ func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386ADDLconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -4527,6 +4465,9 @@ func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem) @@ -4534,13 +4475,12 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVBLSX { break } x := v_1.Args[0] + mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = off v.Aux = sym @@ -4554,13 +4494,12 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVBLZX { break } x := v_1.Args[0] + mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = off v.Aux = sym @@ -4575,14 +4514,13 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -4600,13 +4538,12 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -4623,15 +4560,14 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4649,8 +4585,6 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -4658,7 +4592,8 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -4677,16 +4612,17 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -4707,14 +4643,12 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != Op386SHRWconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != Op386MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -4736,14 +4670,12 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != Op386SHRLconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != Op386MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -4765,10 +4697,9 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != Op386MOVBstore || x.AuxInt != i+1 || x.Aux != s { break } @@ -4791,16 +4722,18 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != Op386MOVBstore || x.AuxInt != i+1 || x.Aux != s { break } @@ -4826,15 +4759,13 @@ func rewriteValue386_Op386MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != Op386SHRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != Op386MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -4857,6 +4788,8 @@ func rewriteValue386_Op386MOVBstore_10(v *Value) bool { return false } func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -4865,13 +4798,12 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -4888,14 +4820,13 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4912,8 +4843,6 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -4921,6 +4850,7 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -4937,13 +4867,12 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(Op386MOVBstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -4958,9 +4887,8 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != Op386MOVBstoreconst { break } @@ -4985,9 +4913,8 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { for { a := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != Op386MOVBstoreconst { break } @@ -5009,19 +4936,21 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -5035,14 +4964,13 @@ func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -5057,10 +4985,9 @@ func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] + p := v_0 + i := v_1 + x := v_2 if x.Op != Op386MOVBstoreconstidx1 { break } @@ -5083,21 +5010,24 @@ func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -5114,16 +5044,15 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386ADDLconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -5141,22 +5070,22 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(Op386MOVWstoreidx1) @@ -5177,22 +5106,22 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(Op386MOVWstoreidx1) @@ -5213,18 +5142,19 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -5249,18 +5179,19 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -5285,23 +5216,23 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != Op386SHRLconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -5323,6 +5254,8 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) @@ -5331,9 +5264,7 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLstore { break } @@ -5356,13 +5287,12 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -5379,14 +5309,13 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -5403,8 +5332,6 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -5412,6 +5339,7 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -5429,8 +5357,6 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -5438,6 +5364,7 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -5455,15 +5382,16 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -5483,8 +5411,6 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -5495,19 +5421,21 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) // result: (MOVLloadidx4 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVLloadidx4) v.AuxInt = c v.Aux = sym @@ -5523,15 +5451,14 @@ func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 v.reset(Op386MOVLloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -5547,15 +5474,14 @@ func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386ADDLconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVLloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -5569,19 +5495,21 @@ func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVLloadidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -5595,14 +5523,13 @@ func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVLloadidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym @@ -5614,6 +5541,9 @@ func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -5622,14 +5552,13 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -5647,13 +5576,12 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -5670,15 +5598,14 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -5696,8 +5623,6 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -5705,7 +5630,8 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -5724,8 +5650,6 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -5733,7 +5657,8 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -5752,16 +5677,17 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -5782,15 +5708,14 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ADDLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386ADDLmodify) @@ -5807,15 +5732,14 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ANDLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386ANDLmodify) @@ -5832,15 +5756,14 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ORLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386ORLmodify) @@ -5857,15 +5780,14 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386XORLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386XORLmodify) @@ -5879,30 +5801,34 @@ func rewriteValue386_Op386MOVLstore_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (ADDLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ADDL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386ADDLmodify) @@ -5921,9 +5847,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386SUBL { break } @@ -5932,8 +5857,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(Op386SUBLmodify) @@ -5950,24 +5875,25 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ANDL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386ANDLmodify) @@ -5986,24 +5912,25 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ORL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386ORLmodify) @@ -6022,24 +5949,25 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386XORL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386XORLmodify) @@ -6058,9 +5986,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ADDLconst { break } @@ -6069,8 +5996,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386ADDLconstmodify) @@ -6086,9 +6013,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ANDLconst { break } @@ -6097,8 +6023,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386ANDLconstmodify) @@ -6114,9 +6040,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386ORLconst { break } @@ -6125,8 +6050,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386ORLconstmodify) @@ -6142,9 +6067,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != Op386XORLconst { break } @@ -6153,8 +6077,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386XORLconstmodify) @@ -6167,6 +6091,8 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { return false } func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -6175,13 +6101,12 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -6198,14 +6123,13 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -6222,8 +6146,6 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -6231,6 +6153,7 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -6248,8 +6171,6 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -6257,6 +6178,7 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -6273,13 +6195,12 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(Op386MOVLstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -6291,18 +6212,20 @@ func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym @@ -6316,14 +6239,13 @@ func rewriteValue386_Op386MOVLstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -6337,14 +6259,13 @@ func rewriteValue386_Op386MOVLstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -6356,19 +6277,21 @@ func rewriteValue386_Op386MOVLstoreconstidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstoreconstidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -6382,14 +6305,13 @@ func rewriteValue386_Op386MOVLstoreconstidx4_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym @@ -6401,20 +6323,23 @@ func rewriteValue386_Op386MOVLstoreconstidx4_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 { continue } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVLstoreidx4) v.AuxInt = c v.Aux = sym @@ -6431,16 +6356,15 @@ func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVLstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -6457,16 +6381,15 @@ func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386ADDLconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVLstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -6481,20 +6404,23 @@ func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVLstoreidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -6509,15 +6435,14 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVLstoreidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym @@ -6533,16 +6458,15 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ADDLloadidx4 || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[3] + mem := y.Args[3] x := y.Args[0] - if ptr != y.Args[1] || idx != y.Args[2] || mem != y.Args[3] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386ADDLmodifyidx4) @@ -6560,16 +6484,15 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ANDLloadidx4 || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[3] + mem := y.Args[3] x := y.Args[0] - if ptr != y.Args[1] || idx != y.Args[2] || mem != y.Args[3] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386ANDLmodifyidx4) @@ -6587,16 +6510,15 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ORLloadidx4 || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[3] + mem := y.Args[3] x := y.Args[0] - if ptr != y.Args[1] || idx != y.Args[2] || mem != y.Args[3] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386ORLmodifyidx4) @@ -6614,16 +6536,15 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386XORLloadidx4 || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[3] + mem := y.Args[3] x := y.Args[0] - if ptr != y.Args[1] || idx != y.Args[2] || mem != y.Args[3] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) { break } v.reset(Op386XORLmodifyidx4) @@ -6641,25 +6562,26 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ADDL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386ADDLmodifyidx4) @@ -6679,10 +6601,9 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386SUBL { break } @@ -6691,8 +6612,8 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(Op386SUBLmodifyidx4) @@ -6710,25 +6631,26 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ANDL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386ANDLmodifyidx4) @@ -6748,25 +6670,26 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ORL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386ORLmodifyidx4) @@ -6783,31 +6706,36 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { return false } func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386XORL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(Op386XORLmodifyidx4) @@ -6827,10 +6755,9 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ADDLconst { break } @@ -6839,8 +6766,8 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386ADDLconstmodifyidx4) @@ -6857,10 +6784,9 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ANDLconst { break } @@ -6869,8 +6795,8 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386ANDLconstmodifyidx4) @@ -6887,10 +6813,9 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386ORLconst { break } @@ -6899,8 +6824,8 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386ORLconstmodifyidx4) @@ -6917,10 +6842,9 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - y := v.Args[2] + ptr := v_0 + idx := v_1 + y := v_2 if y.Op != Op386XORLconst { break } @@ -6929,8 +6853,8 @@ func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[2] - if ptr != l.Args[0] || idx != l.Args[1] || mem != l.Args[2] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + mem := l.Args[2] + if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { break } v.reset(Op386XORLconstmodifyidx4) @@ -6964,6 +6888,8 @@ func rewriteValue386_Op386MOVSDconst_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -6972,13 +6898,12 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -6995,14 +6920,13 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7019,8 +6943,6 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -7028,6 +6950,7 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7045,8 +6968,6 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL8 { break } @@ -7054,6 +6975,7 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7071,15 +6993,16 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -7096,19 +7019,21 @@ func rewriteValue386_Op386MOVSDload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSDloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVSDloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7122,14 +7047,13 @@ func rewriteValue386_Op386MOVSDloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVSDloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7141,19 +7065,21 @@ func rewriteValue386_Op386MOVSDloadidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSDloadidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVSDloadidx8) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7167,14 +7093,13 @@ func rewriteValue386_Op386MOVSDloadidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVSDloadidx8) v.AuxInt = int64(int32(c + 8*d)) v.Aux = sym @@ -7186,6 +7111,9 @@ func rewriteValue386_Op386MOVSDloadidx8_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -7194,14 +7122,13 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -7219,15 +7146,14 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7245,8 +7171,6 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -7254,7 +7178,8 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7273,8 +7198,6 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL8 { break } @@ -7282,7 +7205,8 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7301,16 +7225,17 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -7328,20 +7253,23 @@ func rewriteValue386_Op386MOVSDstore_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSDstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVSDstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7356,15 +7284,14 @@ func rewriteValue386_Op386MOVSDstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVSDstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7377,20 +7304,23 @@ func rewriteValue386_Op386MOVSDstoreidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSDstoreidx8_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVSDstoreidx8) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7405,15 +7335,14 @@ func rewriteValue386_Op386MOVSDstoreidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVSDstoreidx8) v.AuxInt = int64(int32(c + 8*d)) v.Aux = sym @@ -7446,6 +7375,8 @@ func rewriteValue386_Op386MOVSSconst_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSSload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) @@ -7454,13 +7385,12 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -7477,14 +7407,13 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7501,8 +7430,6 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -7510,6 +7437,7 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7527,8 +7455,6 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -7536,6 +7462,7 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7553,15 +7480,16 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -7578,19 +7506,21 @@ func rewriteValue386_Op386MOVSSload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSSloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVSSloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7604,14 +7534,13 @@ func rewriteValue386_Op386MOVSSloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVSSloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7623,19 +7552,21 @@ func rewriteValue386_Op386MOVSSloadidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSSloadidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVSSloadidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7649,14 +7580,13 @@ func rewriteValue386_Op386MOVSSloadidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVSSloadidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym @@ -7668,6 +7598,9 @@ func rewriteValue386_Op386MOVSSloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) @@ -7676,14 +7609,13 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -7701,15 +7633,14 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7727,8 +7658,6 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -7736,7 +7665,8 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7755,8 +7685,6 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } @@ -7764,7 +7692,8 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -7783,16 +7712,17 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -7810,20 +7740,23 @@ func rewriteValue386_Op386MOVSSstore_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSSstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVSSstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7838,15 +7771,14 @@ func rewriteValue386_Op386MOVSSstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVSSstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7859,20 +7791,23 @@ func rewriteValue386_Op386MOVSSstoreidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVSSstoreidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVSSstoreidx4) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -7887,15 +7822,14 @@ func rewriteValue386_Op386MOVSSstoreidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVSSstoreidx4) v.AuxInt = int64(int32(c + 4*d)) v.Aux = sym @@ -7908,12 +7842,13 @@ func rewriteValue386_Op386MOVSSstoreidx4_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWLSX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWLSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVWload { break } @@ -7938,7 +7873,6 @@ func rewriteValue386_Op386MOVWLSX_0(v *Value) bool { // cond: c & 0x8000 == 0 // result: (ANDLconst [c & 0x7fff] x) for { - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -7955,6 +7889,8 @@ func rewriteValue386_Op386MOVWLSX_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWLSXload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -7963,9 +7899,7 @@ func rewriteValue386_Op386MOVWLSXload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVWstore { break } @@ -7987,14 +7921,13 @@ func rewriteValue386_Op386MOVWLSXload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -8008,12 +7941,13 @@ func rewriteValue386_Op386MOVWLSXload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWLZX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVWload { break } @@ -8038,7 +7972,7 @@ func rewriteValue386_Op386MOVWLZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVWloadidx1 { break } @@ -8065,7 +7999,7 @@ func rewriteValue386_Op386MOVWLZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != Op386MOVWloadidx2 { break } @@ -8091,7 +8025,6 @@ func rewriteValue386_Op386MOVWLZX_0(v *Value) bool { // match: (MOVWLZX (ANDLconst [c] x)) // result: (ANDLconst [c & 0xffff] x) for { - v_0 := v.Args[0] if v_0.Op != Op386ANDLconst { break } @@ -8105,6 +8038,8 @@ func rewriteValue386_Op386MOVWLZX_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -8113,9 +8048,7 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVWstore { break } @@ -8137,13 +8070,12 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -8160,14 +8092,13 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -8184,8 +8115,6 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -8193,6 +8122,7 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -8210,8 +8140,6 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL2 { break } @@ -8219,6 +8147,7 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -8236,15 +8165,16 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -8264,8 +8194,6 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -8276,19 +8204,21 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) // result: (MOVWloadidx2 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWloadidx2) v.AuxInt = c v.Aux = sym @@ -8304,15 +8234,14 @@ func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 v.reset(Op386MOVWloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -8328,15 +8257,14 @@ func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386ADDLconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -8350,19 +8278,21 @@ func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWloadidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) // result: (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVWloadidx2) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -8376,14 +8306,13 @@ func rewriteValue386_Op386MOVWloadidx2_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWloadidx2) v.AuxInt = int64(int32(c + 2*d)) v.Aux = sym @@ -8395,6 +8324,9 @@ func rewriteValue386_Op386MOVWloadidx2_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem) @@ -8402,13 +8334,12 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVWLSX { break } x := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWstore) v.AuxInt = off v.Aux = sym @@ -8422,13 +8353,12 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVWLZX { break } x := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWstore) v.AuxInt = off v.Aux = sym @@ -8443,14 +8373,13 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -8468,13 +8397,12 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -8491,15 +8419,14 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -8517,8 +8444,6 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -8526,7 +8451,8 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -8545,8 +8471,6 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL2 { break } @@ -8554,7 +8478,8 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -8573,16 +8498,17 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -8603,14 +8529,12 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != Op386SHRLconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != Op386MOVWstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -8632,15 +8556,13 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != Op386SHRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != Op386MOVWstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -8663,6 +8585,8 @@ func rewriteValue386_Op386MOVWstore_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) @@ -8671,13 +8595,12 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -8694,14 +8617,13 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -8718,8 +8640,6 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } @@ -8727,6 +8647,7 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -8744,8 +8665,6 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL2 { break } @@ -8753,6 +8672,7 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -8769,13 +8689,12 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDL { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(Op386MOVWstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -8790,9 +8709,8 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != Op386MOVWstoreconst { break } @@ -8817,9 +8735,8 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { for { a := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != Op386MOVWstoreconst { break } @@ -8841,18 +8758,20 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWstoreconstidx2) v.AuxInt = c v.Aux = sym @@ -8866,14 +8785,13 @@ func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -8887,14 +8805,13 @@ func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -8909,10 +8826,9 @@ func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] + p := v_0 + i := v_1 + x := v_2 if x.Op != Op386MOVWstoreconstidx1 { break } @@ -8935,20 +8851,22 @@ func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWstoreconstidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -8962,14 +8880,13 @@ func rewriteValue386_Op386MOVWstoreconstidx2_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(Op386MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym @@ -8984,10 +8901,9 @@ func rewriteValue386_Op386MOVWstoreconstidx2_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] + p := v_0 + i := v_1 + x := v_2 if x.Op != Op386MOVWstoreconstidx2 { break } @@ -9013,20 +8929,23 @@ func rewriteValue386_Op386MOVWstoreconstidx2_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 { continue } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVWstoreidx2) v.AuxInt = c v.Aux = sym @@ -9043,16 +8962,15 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386ADDLconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVWstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -9069,16 +8987,15 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != Op386ADDLconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVWstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -9096,22 +9013,22 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(Op386MOVLstoreidx1) @@ -9132,23 +9049,23 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != Op386SHRLconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -9170,21 +9087,24 @@ func rewriteValue386_Op386MOVWstoreidx1_0(v *Value) bool { return false } func rewriteValue386_Op386MOVWstoreidx2_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) // result: (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(Op386MOVWstoreidx2) v.AuxInt = int64(int32(c + d)) v.Aux = sym @@ -9199,15 +9119,14 @@ func rewriteValue386_Op386MOVWstoreidx2_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(Op386MOVWstoreidx2) v.AuxInt = int64(int32(c + 2*d)) v.Aux = sym @@ -9223,15 +9142,13 @@ func rewriteValue386_Op386MOVWstoreidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { break } @@ -9257,16 +9174,14 @@ func rewriteValue386_Op386MOVWstoreidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != Op386SHRLconst { break } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != Op386MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { break } @@ -9293,13 +9208,13 @@ func rewriteValue386_Op386MOVWstoreidx2_0(v *Value) bool { return false } func rewriteValue386_Op386MULL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULL x (MOVLconst [c])) // result: (MULLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } @@ -9315,10 +9230,9 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLload { continue } @@ -9343,10 +9257,9 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLloadidx4 x [off] {sym} ptr idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLloadidx4 { continue } @@ -9372,12 +9285,12 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { return false } func rewriteValue386_Op386MULLconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLconst [c] (MULLconst [d] x)) // result: (MULLconst [int64(int32(c * d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MULLconst { break } @@ -9394,7 +9307,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != -9 { break } - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) v0.AddArg(x) @@ -9408,7 +9321,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != -5 { break } - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) v0.AddArg(x) @@ -9422,7 +9335,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != -3 { break } - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) v0.AddArg(x) @@ -9436,7 +9349,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v.AddArg(x) return true @@ -9457,7 +9370,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != 1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -9469,7 +9382,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != 3 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL2) v.AddArg(x) v.AddArg(x) @@ -9481,7 +9394,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != 5 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL4) v.AddArg(x) v.AddArg(x) @@ -9493,7 +9406,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { if v.AuxInt != 7 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL2) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) @@ -9505,6 +9418,7 @@ func rewriteValue386_Op386MULLconst_0(v *Value) bool { return false } func rewriteValue386_Op386MULLconst_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLconst [9] x) // result: (LEAL8 x x) @@ -9512,7 +9426,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 9 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v.AddArg(x) v.AddArg(x) @@ -9524,7 +9438,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 11 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL2) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) @@ -9539,7 +9453,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 13 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL4) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) @@ -9554,7 +9468,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 19 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL2) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) @@ -9569,7 +9483,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 21 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL4) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) @@ -9584,7 +9498,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 25 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) @@ -9599,7 +9513,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 27 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) v0.AddArg(x) @@ -9617,7 +9531,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 37 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL4) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) @@ -9632,7 +9546,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 41 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) @@ -9647,7 +9561,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { if v.AuxInt != 45 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) v0.AddArg(x) @@ -9662,6 +9576,7 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { return false } func rewriteValue386_Op386MULLconst_20(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLconst [73] x) // result: (LEAL8 x (LEAL8 x x)) @@ -9669,7 +9584,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { if v.AuxInt != 73 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) @@ -9684,7 +9599,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { if v.AuxInt != 81 { break } - x := v.Args[0] + x := v_0 v.reset(Op386LEAL8) v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) v0.AddArg(x) @@ -9701,7 +9616,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (SUBL (SHLLconst [log2(c+1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c+1) && c >= 15) { break } @@ -9718,7 +9633,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (LEAL1 (SHLLconst [log2(c-1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-1) && c >= 17) { break } @@ -9735,7 +9650,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (LEAL2 (SHLLconst [log2(c-2)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-2) && c >= 34) { break } @@ -9752,7 +9667,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (LEAL4 (SHLLconst [log2(c-4)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-4) && c >= 68) { break } @@ -9769,7 +9684,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (LEAL8 (SHLLconst [log2(c-8)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-8) && c >= 136) { break } @@ -9786,7 +9701,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (SHLLconst [log2(c/3)] (LEAL2 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } @@ -9803,7 +9718,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (SHLLconst [log2(c/5)] (LEAL4 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } @@ -9820,7 +9735,7 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { // result: (SHLLconst [log2(c/9)] (LEAL8 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } @@ -9835,11 +9750,11 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { return false } func rewriteValue386_Op386MULLconst_30(v *Value) bool { + v_0 := v.Args[0] // match: (MULLconst [c] (MOVLconst [d])) // result: (MOVLconst [int64(int32(c*d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -9851,6 +9766,9 @@ func rewriteValue386_Op386MULLconst_30(v *Value) bool { return false } func rewriteValue386_Op386MULLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -9859,14 +9777,13 @@ func rewriteValue386_Op386MULLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -9884,15 +9801,14 @@ func rewriteValue386_Op386MULLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -9910,9 +9826,7 @@ func rewriteValue386_Op386MULLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL4 { break } @@ -9920,6 +9834,7 @@ func rewriteValue386_Op386MULLload_0(v *Value) bool { sym2 := v_1.Aux idx := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9935,6 +9850,10 @@ func rewriteValue386_Op386MULLload_0(v *Value) bool { return false } func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MULLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) @@ -9943,15 +9862,14 @@ func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -9970,15 +9888,14 @@ func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - base := v.Args[1] - v_2 := v.Args[2] + val := v_0 + base := v_1 if v_2.Op != Op386ADDLconst { break } off2 := v_2.AuxInt idx := v_2.Args[0] + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -9997,16 +9914,15 @@ func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10022,16 +9938,17 @@ func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386MULSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVSDload { continue } @@ -10055,6 +9972,9 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { return false } func rewriteValue386_Op386MULSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -10063,14 +9983,13 @@ func rewriteValue386_Op386MULSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -10088,15 +10007,14 @@ func rewriteValue386_Op386MULSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10111,16 +10029,17 @@ func rewriteValue386_Op386MULSDload_0(v *Value) bool { return false } func rewriteValue386_Op386MULSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVSSload { continue } @@ -10144,6 +10063,9 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { return false } func rewriteValue386_Op386MULSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -10152,14 +10074,13 @@ func rewriteValue386_Op386MULSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -10177,15 +10098,14 @@ func rewriteValue386_Op386MULSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10200,10 +10120,10 @@ func rewriteValue386_Op386MULSSload_0(v *Value) bool { return false } func rewriteValue386_Op386NEGL_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGL (MOVLconst [c])) // result: (MOVLconst [int64(int32(-c))]) for { - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -10215,10 +10135,10 @@ func rewriteValue386_Op386NEGL_0(v *Value) bool { return false } func rewriteValue386_Op386NOTL_0(v *Value) bool { + v_0 := v.Args[0] // match: (NOTL (MOVLconst [c])) // result: (MOVLconst [^c]) for { - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -10230,15 +10150,15 @@ func rewriteValue386_Op386NOTL_0(v *Value) bool { return false } func rewriteValue386_Op386ORL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORL x (MOVLconst [c])) // result: (ORLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } @@ -10254,15 +10174,12 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // cond: d == 32-c // result: (ROLLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRLconst { continue } @@ -10282,15 +10199,12 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // result: (ROLWconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRWconst { continue } @@ -10310,15 +10224,12 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // result: (ROLBconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRBconst { continue } @@ -10337,10 +10248,9 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLload { continue } @@ -10365,10 +10275,9 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLloadidx4 x [off] {sym} ptr idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLloadidx4 { continue } @@ -10394,8 +10303,8 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // match: (ORL x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -10407,9 +10316,8 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != Op386MOVBload { continue } @@ -10417,7 +10325,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - s0 := v.Args[1^_i0] + s0 := v_1 if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { continue } @@ -10449,15 +10357,16 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != Op386ORL { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + x0 := o0_0 if x0.Op != Op386MOVWload { continue } @@ -10465,7 +10374,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - s0 := o0.Args[1^_i1] + s0 := o0_1 if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { continue } @@ -10481,7 +10390,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - s1 := v.Args[1^_i0] + s1 := v_1 if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { continue } @@ -10514,19 +10423,20 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != Op386MOVBloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - s0 := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + s0 := v_1 if s0.Op != Op386SHLLconst || s0.AuxInt != 8 { continue } @@ -10539,8 +10449,10 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { continue } b = mergePoint(b, x0, x1) @@ -10561,30 +10473,35 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { return false } func rewriteValue386_Op386ORL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != Op386ORL { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + x0 := o0_0 if x0.Op != Op386MOVWloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - p := x0.Args[_i2] - idx := x0.Args[1^_i2] - s0 := o0.Args[1^_i1] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + s0 := o0_1 if s0.Op != Op386SHLLconst || s0.AuxInt != 16 { continue } @@ -10597,11 +10514,13 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - s1 := v.Args[1^_i0] + s1 := v_1 if s1.Op != Op386SHLLconst || s1.AuxInt != 24 { continue } @@ -10614,8 +10533,10 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { continue } _ = x2.Args[2] - for _i4 := 0; _i4 <= 1; _i4++ { - if p != x2.Args[_i4] || idx != x2.Args[1^_i4] || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + x2_0 := x2.Args[0] + x2_1 := x2.Args[1] + for _i4 := 0; _i4 <= 1; _i4, x2_0, x2_1 = _i4+1, x2_1, x2_0 { + if p != x2_0 || idx != x2_1 || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { continue } b = mergePoint(b, x0, x1, x2) @@ -10638,12 +10559,13 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { return false } func rewriteValue386_Op386ORLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORLconst [c] x) // cond: int32(c)==0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -10668,7 +10590,6 @@ func rewriteValue386_Op386ORLconst_0(v *Value) bool { // result: (MOVLconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -10680,6 +10601,8 @@ func rewriteValue386_Op386ORLconst_0(v *Value) bool { return false } func rewriteValue386_Op386ORLconstmodify_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) @@ -10688,13 +10611,12 @@ func rewriteValue386_Op386ORLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -10711,14 +10633,13 @@ func rewriteValue386_Op386ORLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10732,6 +10653,9 @@ func rewriteValue386_Op386ORLconstmodify_0(v *Value) bool { return false } func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) @@ -10740,14 +10664,13 @@ func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -10765,14 +10688,13 @@ func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2 * 4)) { break } @@ -10790,15 +10712,14 @@ func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10813,6 +10734,9 @@ func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ORLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -10821,14 +10745,13 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -10846,15 +10769,14 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10872,9 +10794,7 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL4 { break } @@ -10882,6 +10802,7 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { sym2 := v_1.Aux idx := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10897,6 +10818,10 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { return false } func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) @@ -10905,15 +10830,14 @@ func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -10932,15 +10856,14 @@ func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - base := v.Args[1] - v_2 := v.Args[2] + val := v_0 + base := v_1 if v_2.Op != Op386ADDLconst { break } off2 := v_2.AuxInt idx := v_2.Args[0] + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -10959,16 +10882,15 @@ func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10984,6 +10906,9 @@ func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ORLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem) @@ -10992,14 +10917,13 @@ func rewriteValue386_Op386ORLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -11017,15 +10941,14 @@ func rewriteValue386_Op386ORLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -11040,6 +10963,10 @@ func rewriteValue386_Op386ORLmodify_0(v *Value) bool { return false } func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (ORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) @@ -11048,15 +10975,14 @@ func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -11075,15 +11001,14 @@ func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -11102,16 +11027,15 @@ func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -11130,14 +11054,13 @@ func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != Op386MOVLconst { break } c := v_2.AuxInt + mem := v_3 if !(validValAndOff(c, off)) { break } @@ -11152,11 +11075,11 @@ func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386ROLBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ROLBconst [c] (ROLBconst [d] x)) // result: (ROLBconst [(c+d)& 7] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ROLBconst { break } @@ -11173,7 +11096,7 @@ func rewriteValue386_Op386ROLBconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -11182,11 +11105,11 @@ func rewriteValue386_Op386ROLBconst_0(v *Value) bool { return false } func rewriteValue386_Op386ROLLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ROLLconst [c] (ROLLconst [d] x)) // result: (ROLLconst [(c+d)&31] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ROLLconst { break } @@ -11203,7 +11126,7 @@ func rewriteValue386_Op386ROLLconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -11212,11 +11135,11 @@ func rewriteValue386_Op386ROLLconst_0(v *Value) bool { return false } func rewriteValue386_Op386ROLWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ROLWconst [c] (ROLWconst [d] x)) // result: (ROLWconst [(c+d)&15] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386ROLWconst { break } @@ -11233,7 +11156,7 @@ func rewriteValue386_Op386ROLWconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -11242,12 +11165,12 @@ func rewriteValue386_Op386ROLWconst_0(v *Value) bool { return false } func rewriteValue386_Op386SARB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SARB x (MOVLconst [c])) // result: (SARBconst [min(c&31,7)] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -11260,13 +11183,14 @@ func rewriteValue386_Op386SARB_0(v *Value) bool { return false } func rewriteValue386_Op386SARBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SARBconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -11276,7 +11200,6 @@ func rewriteValue386_Op386SARBconst_0(v *Value) bool { // result: (MOVLconst [d>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -11288,12 +11211,12 @@ func rewriteValue386_Op386SARBconst_0(v *Value) bool { return false } func rewriteValue386_Op386SARL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SARL x (MOVLconst [c])) // result: (SARLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -11306,9 +11229,7 @@ func rewriteValue386_Op386SARL_0(v *Value) bool { // match: (SARL x (ANDLconst [31] y)) // result: (SARL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386ANDLconst || v_1.AuxInt != 31 { break } @@ -11321,13 +11242,14 @@ func rewriteValue386_Op386SARL_0(v *Value) bool { return false } func rewriteValue386_Op386SARLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SARLconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -11337,7 +11259,6 @@ func rewriteValue386_Op386SARLconst_0(v *Value) bool { // result: (MOVLconst [d>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -11349,12 +11270,12 @@ func rewriteValue386_Op386SARLconst_0(v *Value) bool { return false } func rewriteValue386_Op386SARW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SARW x (MOVLconst [c])) // result: (SARWconst [min(c&31,15)] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -11367,13 +11288,14 @@ func rewriteValue386_Op386SARW_0(v *Value) bool { return false } func rewriteValue386_Op386SARWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SARWconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -11383,7 +11305,6 @@ func rewriteValue386_Op386SARWconst_0(v *Value) bool { // result: (MOVLconst [d>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -11395,16 +11316,18 @@ func rewriteValue386_Op386SARWconst_0(v *Value) bool { return false } func rewriteValue386_Op386SBBL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SBBL x (MOVLconst [c]) f) // result: (SBBLconst [c] x f) for { - f := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } c := v_1.AuxInt + f := v_2 v.reset(Op386SBBLconst) v.AuxInt = c v.AddArg(x) @@ -11414,10 +11337,10 @@ func rewriteValue386_Op386SBBL_0(v *Value) bool { return false } func rewriteValue386_Op386SBBLcarrymask_0(v *Value) bool { + v_0 := v.Args[0] // match: (SBBLcarrymask (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11428,7 +11351,6 @@ func rewriteValue386_Op386SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagLT_ULT)) // result: (MOVLconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11439,7 +11361,6 @@ func rewriteValue386_Op386SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11450,7 +11371,6 @@ func rewriteValue386_Op386SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagGT_ULT)) // result: (MOVLconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11461,7 +11381,6 @@ func rewriteValue386_Op386SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11472,10 +11391,10 @@ func rewriteValue386_Op386SBBLcarrymask_0(v *Value) bool { return false } func rewriteValue386_Op386SETA_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETA (InvertFlags x)) // result: (SETB x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11487,7 +11406,6 @@ func rewriteValue386_Op386SETA_0(v *Value) bool { // match: (SETA (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11498,7 +11416,6 @@ func rewriteValue386_Op386SETA_0(v *Value) bool { // match: (SETA (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11509,7 +11426,6 @@ func rewriteValue386_Op386SETA_0(v *Value) bool { // match: (SETA (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11520,7 +11436,6 @@ func rewriteValue386_Op386SETA_0(v *Value) bool { // match: (SETA (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11531,7 +11446,6 @@ func rewriteValue386_Op386SETA_0(v *Value) bool { // match: (SETA (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11542,10 +11456,10 @@ func rewriteValue386_Op386SETA_0(v *Value) bool { return false } func rewriteValue386_Op386SETAE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETAE (InvertFlags x)) // result: (SETBE x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11557,7 +11471,6 @@ func rewriteValue386_Op386SETAE_0(v *Value) bool { // match: (SETAE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11568,7 +11481,6 @@ func rewriteValue386_Op386SETAE_0(v *Value) bool { // match: (SETAE (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11579,7 +11491,6 @@ func rewriteValue386_Op386SETAE_0(v *Value) bool { // match: (SETAE (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11590,7 +11501,6 @@ func rewriteValue386_Op386SETAE_0(v *Value) bool { // match: (SETAE (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11601,7 +11511,6 @@ func rewriteValue386_Op386SETAE_0(v *Value) bool { // match: (SETAE (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11612,10 +11521,10 @@ func rewriteValue386_Op386SETAE_0(v *Value) bool { return false } func rewriteValue386_Op386SETB_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETB (InvertFlags x)) // result: (SETA x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11627,7 +11536,6 @@ func rewriteValue386_Op386SETB_0(v *Value) bool { // match: (SETB (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11638,7 +11546,6 @@ func rewriteValue386_Op386SETB_0(v *Value) bool { // match: (SETB (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11649,7 +11556,6 @@ func rewriteValue386_Op386SETB_0(v *Value) bool { // match: (SETB (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11660,7 +11566,6 @@ func rewriteValue386_Op386SETB_0(v *Value) bool { // match: (SETB (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11671,7 +11576,6 @@ func rewriteValue386_Op386SETB_0(v *Value) bool { // match: (SETB (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11682,10 +11586,10 @@ func rewriteValue386_Op386SETB_0(v *Value) bool { return false } func rewriteValue386_Op386SETBE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETBE (InvertFlags x)) // result: (SETAE x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11697,7 +11601,6 @@ func rewriteValue386_Op386SETBE_0(v *Value) bool { // match: (SETBE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11708,7 +11611,6 @@ func rewriteValue386_Op386SETBE_0(v *Value) bool { // match: (SETBE (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11719,7 +11621,6 @@ func rewriteValue386_Op386SETBE_0(v *Value) bool { // match: (SETBE (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11730,7 +11631,6 @@ func rewriteValue386_Op386SETBE_0(v *Value) bool { // match: (SETBE (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11741,7 +11641,6 @@ func rewriteValue386_Op386SETBE_0(v *Value) bool { // match: (SETBE (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11752,10 +11651,10 @@ func rewriteValue386_Op386SETBE_0(v *Value) bool { return false } func rewriteValue386_Op386SETEQ_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETEQ (InvertFlags x)) // result: (SETEQ x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11767,7 +11666,6 @@ func rewriteValue386_Op386SETEQ_0(v *Value) bool { // match: (SETEQ (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11778,7 +11676,6 @@ func rewriteValue386_Op386SETEQ_0(v *Value) bool { // match: (SETEQ (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11789,7 +11686,6 @@ func rewriteValue386_Op386SETEQ_0(v *Value) bool { // match: (SETEQ (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11800,7 +11696,6 @@ func rewriteValue386_Op386SETEQ_0(v *Value) bool { // match: (SETEQ (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11811,7 +11706,6 @@ func rewriteValue386_Op386SETEQ_0(v *Value) bool { // match: (SETEQ (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11822,10 +11716,10 @@ func rewriteValue386_Op386SETEQ_0(v *Value) bool { return false } func rewriteValue386_Op386SETG_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETG (InvertFlags x)) // result: (SETL x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11837,7 +11731,6 @@ func rewriteValue386_Op386SETG_0(v *Value) bool { // match: (SETG (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11848,7 +11741,6 @@ func rewriteValue386_Op386SETG_0(v *Value) bool { // match: (SETG (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11859,7 +11751,6 @@ func rewriteValue386_Op386SETG_0(v *Value) bool { // match: (SETG (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11870,7 +11761,6 @@ func rewriteValue386_Op386SETG_0(v *Value) bool { // match: (SETG (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11881,7 +11771,6 @@ func rewriteValue386_Op386SETG_0(v *Value) bool { // match: (SETG (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11892,10 +11781,10 @@ func rewriteValue386_Op386SETG_0(v *Value) bool { return false } func rewriteValue386_Op386SETGE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETGE (InvertFlags x)) // result: (SETLE x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11907,7 +11796,6 @@ func rewriteValue386_Op386SETGE_0(v *Value) bool { // match: (SETGE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11918,7 +11806,6 @@ func rewriteValue386_Op386SETGE_0(v *Value) bool { // match: (SETGE (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11929,7 +11816,6 @@ func rewriteValue386_Op386SETGE_0(v *Value) bool { // match: (SETGE (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -11940,7 +11826,6 @@ func rewriteValue386_Op386SETGE_0(v *Value) bool { // match: (SETGE (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -11951,7 +11836,6 @@ func rewriteValue386_Op386SETGE_0(v *Value) bool { // match: (SETGE (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -11962,10 +11846,10 @@ func rewriteValue386_Op386SETGE_0(v *Value) bool { return false } func rewriteValue386_Op386SETL_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETL (InvertFlags x)) // result: (SETG x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -11977,7 +11861,6 @@ func rewriteValue386_Op386SETL_0(v *Value) bool { // match: (SETL (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -11988,7 +11871,6 @@ func rewriteValue386_Op386SETL_0(v *Value) bool { // match: (SETL (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -11999,7 +11881,6 @@ func rewriteValue386_Op386SETL_0(v *Value) bool { // match: (SETL (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -12010,7 +11891,6 @@ func rewriteValue386_Op386SETL_0(v *Value) bool { // match: (SETL (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -12021,7 +11901,6 @@ func rewriteValue386_Op386SETL_0(v *Value) bool { // match: (SETL (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -12032,10 +11911,10 @@ func rewriteValue386_Op386SETL_0(v *Value) bool { return false } func rewriteValue386_Op386SETLE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETLE (InvertFlags x)) // result: (SETGE x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -12047,7 +11926,6 @@ func rewriteValue386_Op386SETLE_0(v *Value) bool { // match: (SETLE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -12058,7 +11936,6 @@ func rewriteValue386_Op386SETLE_0(v *Value) bool { // match: (SETLE (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -12069,7 +11946,6 @@ func rewriteValue386_Op386SETLE_0(v *Value) bool { // match: (SETLE (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -12080,7 +11956,6 @@ func rewriteValue386_Op386SETLE_0(v *Value) bool { // match: (SETLE (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -12091,7 +11966,6 @@ func rewriteValue386_Op386SETLE_0(v *Value) bool { // match: (SETLE (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -12102,10 +11976,10 @@ func rewriteValue386_Op386SETLE_0(v *Value) bool { return false } func rewriteValue386_Op386SETNE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETNE (InvertFlags x)) // result: (SETNE x) for { - v_0 := v.Args[0] if v_0.Op != Op386InvertFlags { break } @@ -12117,7 +11991,6 @@ func rewriteValue386_Op386SETNE_0(v *Value) bool { // match: (SETNE (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagEQ { break } @@ -12128,7 +12001,6 @@ func rewriteValue386_Op386SETNE_0(v *Value) bool { // match: (SETNE (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_ULT { break } @@ -12139,7 +12011,6 @@ func rewriteValue386_Op386SETNE_0(v *Value) bool { // match: (SETNE (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagLT_UGT { break } @@ -12150,7 +12021,6 @@ func rewriteValue386_Op386SETNE_0(v *Value) bool { // match: (SETNE (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_ULT { break } @@ -12161,7 +12031,6 @@ func rewriteValue386_Op386SETNE_0(v *Value) bool { // match: (SETNE (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != Op386FlagGT_UGT { break } @@ -12172,12 +12041,12 @@ func rewriteValue386_Op386SETNE_0(v *Value) bool { return false } func rewriteValue386_Op386SHLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SHLL x (MOVLconst [c])) // result: (SHLLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -12190,9 +12059,7 @@ func rewriteValue386_Op386SHLL_0(v *Value) bool { // match: (SHLL x (ANDLconst [31] y)) // result: (SHLL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386ANDLconst || v_1.AuxInt != 31 { break } @@ -12205,13 +12072,14 @@ func rewriteValue386_Op386SHLL_0(v *Value) bool { return false } func rewriteValue386_Op386SHLLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHLLconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -12220,13 +12088,13 @@ func rewriteValue386_Op386SHLLconst_0(v *Value) bool { return false } func rewriteValue386_Op386SHRB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SHRB x (MOVLconst [c])) // cond: c&31 < 8 // result: (SHRBconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -12243,8 +12111,6 @@ func rewriteValue386_Op386SHRB_0(v *Value) bool { // cond: c&31 >= 8 // result: (MOVLconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != Op386MOVLconst { break } @@ -12259,13 +12125,14 @@ func rewriteValue386_Op386SHRB_0(v *Value) bool { return false } func rewriteValue386_Op386SHRBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRBconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -12274,12 +12141,12 @@ func rewriteValue386_Op386SHRBconst_0(v *Value) bool { return false } func rewriteValue386_Op386SHRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SHRL x (MOVLconst [c])) // result: (SHRLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -12292,9 +12159,7 @@ func rewriteValue386_Op386SHRL_0(v *Value) bool { // match: (SHRL x (ANDLconst [31] y)) // result: (SHRL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386ANDLconst || v_1.AuxInt != 31 { break } @@ -12307,13 +12172,14 @@ func rewriteValue386_Op386SHRL_0(v *Value) bool { return false } func rewriteValue386_Op386SHRLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRLconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -12322,13 +12188,13 @@ func rewriteValue386_Op386SHRLconst_0(v *Value) bool { return false } func rewriteValue386_Op386SHRW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SHRW x (MOVLconst [c])) // cond: c&31 < 16 // result: (SHRWconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -12345,8 +12211,6 @@ func rewriteValue386_Op386SHRW_0(v *Value) bool { // cond: c&31 >= 16 // result: (MOVLconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != Op386MOVLconst { break } @@ -12361,13 +12225,14 @@ func rewriteValue386_Op386SHRW_0(v *Value) bool { return false } func rewriteValue386_Op386SHRWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRWconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -12376,13 +12241,13 @@ func rewriteValue386_Op386SHRWconst_0(v *Value) bool { return false } func rewriteValue386_Op386SUBL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBL x (MOVLconst [c])) // result: (SUBLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -12395,12 +12260,11 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { // match: (SUBL (MOVLconst [c]) x) // result: (NEGL (SUBLconst x [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } c := v_0.AuxInt + x := v_1 v.reset(Op386NEGL) v0 := b.NewValue0(v.Pos, Op386SUBLconst, v.Type) v0.AuxInt = c @@ -12412,9 +12276,8 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVLload { break } @@ -12437,9 +12300,8 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLloadidx4 x [off] {sym} ptr idx mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVLloadidx4 { break } @@ -12463,8 +12325,8 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { // match: (SUBL x x) // result: (MOVLconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(Op386MOVLconst) @@ -12474,12 +12336,12 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { return false } func rewriteValue386_Op386SUBLcarry_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBLcarry x (MOVLconst [c])) // result: (SUBLconstcarry [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -12492,12 +12354,13 @@ func rewriteValue386_Op386SUBLcarry_0(v *Value) bool { return false } func rewriteValue386_Op386SUBLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBLconst [c] x) // cond: int32(c) == 0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -12510,7 +12373,7 @@ func rewriteValue386_Op386SUBLconst_0(v *Value) bool { // result: (ADDLconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 v.reset(Op386ADDLconst) v.AuxInt = int64(int32(-c)) v.AddArg(x) @@ -12518,6 +12381,9 @@ func rewriteValue386_Op386SUBLconst_0(v *Value) bool { } } func rewriteValue386_Op386SUBLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -12526,14 +12392,13 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -12551,15 +12416,14 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12577,9 +12441,7 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL4 { break } @@ -12587,6 +12449,7 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool { sym2 := v_1.Aux idx := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -12602,6 +12465,10 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool { return false } func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) @@ -12610,15 +12477,14 @@ func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -12637,15 +12503,14 @@ func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - base := v.Args[1] - v_2 := v.Args[2] + val := v_0 + base := v_1 if v_2.Op != Op386ADDLconst { break } off2 := v_2.AuxInt idx := v_2.Args[0] + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -12664,16 +12529,15 @@ func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12689,6 +12553,9 @@ func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem) @@ -12697,14 +12564,13 @@ func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -12722,15 +12588,14 @@ func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12745,6 +12610,10 @@ func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { return false } func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) @@ -12753,15 +12622,14 @@ func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -12780,15 +12648,14 @@ func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -12807,16 +12674,15 @@ func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12835,14 +12701,13 @@ func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != Op386MOVLconst { break } c := v_2.AuxInt + mem := v_3 if !(validValAndOff(-c, off)) { break } @@ -12857,15 +12722,16 @@ func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386SUBSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (SUBSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVSDload { break } @@ -12887,6 +12753,9 @@ func rewriteValue386_Op386SUBSD_0(v *Value) bool { return false } func rewriteValue386_Op386SUBSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -12895,14 +12764,13 @@ func rewriteValue386_Op386SUBSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -12920,15 +12788,14 @@ func rewriteValue386_Op386SUBSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12943,15 +12810,16 @@ func rewriteValue386_Op386SUBSDload_0(v *Value) bool { return false } func rewriteValue386_Op386SUBSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (SUBSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != Op386MOVSSload { break } @@ -12973,6 +12841,9 @@ func rewriteValue386_Op386SUBSS_0(v *Value) bool { return false } func rewriteValue386_Op386SUBSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -12981,14 +12852,13 @@ func rewriteValue386_Op386SUBSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -13006,15 +12876,14 @@ func rewriteValue386_Op386SUBSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13029,13 +12898,13 @@ func rewriteValue386_Op386SUBSSload_0(v *Value) bool { return false } func rewriteValue386_Op386XORL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XORL x (MOVLconst [c])) // result: (XORLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != Op386MOVLconst { continue } @@ -13051,15 +12920,12 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // cond: d == 32-c // result: (ROLLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRLconst { continue } @@ -13079,15 +12945,12 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // result: (ROLWconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRWconst { continue } @@ -13107,15 +12970,12 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // result: (ROLBconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != Op386SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != Op386SHRBconst { continue } @@ -13134,10 +12994,9 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLload { continue } @@ -13162,10 +13021,9 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLloadidx4 x [off] {sym} ptr idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != Op386MOVLloadidx4 { continue } @@ -13191,8 +13049,8 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { // match: (XORL x x) // result: (MOVLconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(Op386MOVLconst) @@ -13202,11 +13060,11 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { return false } func rewriteValue386_Op386XORLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORLconst [c] (XORLconst [d] x)) // result: (XORLconst [c ^ d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386XORLconst { break } @@ -13222,7 +13080,7 @@ func rewriteValue386_Op386XORLconst_0(v *Value) bool { // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -13235,7 +13093,6 @@ func rewriteValue386_Op386XORLconst_0(v *Value) bool { // result: (MOVLconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != Op386MOVLconst { break } @@ -13247,6 +13104,8 @@ func rewriteValue386_Op386XORLconst_0(v *Value) bool { return false } func rewriteValue386_Op386XORLconstmodify_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) @@ -13255,13 +13114,12 @@ func rewriteValue386_Op386XORLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -13278,14 +13136,13 @@ func rewriteValue386_Op386XORLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13299,6 +13156,9 @@ func rewriteValue386_Op386XORLconstmodify_0(v *Value) bool { return false } func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (XORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) @@ -13307,14 +13167,13 @@ func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -13332,14 +13191,13 @@ func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2 * 4)) { break } @@ -13357,15 +13215,14 @@ func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13380,6 +13237,9 @@ func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_Op386XORLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem) @@ -13388,14 +13248,13 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -13413,15 +13272,14 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13439,9 +13297,7 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL4 { break } @@ -13449,6 +13305,7 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { sym2 := v_1.Aux idx := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13464,6 +13321,10 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { return false } func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (XORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) @@ -13472,15 +13333,14 @@ func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -13499,15 +13359,14 @@ func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - val := v.Args[0] - base := v.Args[1] - v_2 := v.Args[2] + val := v_0 + base := v_1 if v_2.Op != Op386ADDLconst { break } off2 := v_2.AuxInt idx := v_2.Args[0] + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -13526,16 +13385,15 @@ func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - idx := v.Args[2] + idx := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13551,6 +13409,9 @@ func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool { return false } func rewriteValue386_Op386XORLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (XORLmodify [off1] {sym} (ADDLconst [off2] base) val mem) @@ -13559,14 +13420,13 @@ func rewriteValue386_Op386XORLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -13584,15 +13444,14 @@ func rewriteValue386_Op386XORLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13607,6 +13466,10 @@ func rewriteValue386_Op386XORLmodify_0(v *Value) bool { return false } func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (XORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) @@ -13615,15 +13478,14 @@ func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2)) { break } @@ -13642,15 +13504,14 @@ func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[3] - base := v.Args[0] - v_1 := v.Args[1] + base := v_0 if v_1.Op != Op386ADDLconst { break } off2 := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(off1 + off2*4)) { break } @@ -13669,16 +13530,15 @@ func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13697,14 +13557,13 @@ func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != Op386MOVLconst { break } c := v_2.AuxInt + mem := v_3 if !(validValAndOff(c, off)) { break } @@ -13719,11 +13578,13 @@ func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { return false } func rewriteValue386_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDL) v.AddArg(x) v.AddArg(y) @@ -13731,11 +13592,13 @@ func rewriteValue386_OpAdd16_0(v *Value) bool { } } func rewriteValue386_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDL) v.AddArg(x) v.AddArg(y) @@ -13743,11 +13606,13 @@ func rewriteValue386_OpAdd32_0(v *Value) bool { } } func rewriteValue386_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (ADDSS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDSS) v.AddArg(x) v.AddArg(y) @@ -13755,11 +13620,13 @@ func rewriteValue386_OpAdd32F_0(v *Value) bool { } } func rewriteValue386_OpAdd32carry_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32carry x y) // result: (ADDLcarry x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDLcarry) v.AddArg(x) v.AddArg(y) @@ -13767,12 +13634,15 @@ func rewriteValue386_OpAdd32carry_0(v *Value) bool { } } func rewriteValue386_OpAdd32withcarry_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32withcarry x y c) // result: (ADCL x y c) for { - c := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + c := v_2 v.reset(Op386ADCL) v.AddArg(x) v.AddArg(y) @@ -13781,11 +13651,13 @@ func rewriteValue386_OpAdd32withcarry_0(v *Value) bool { } } func rewriteValue386_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (ADDSD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDSD) v.AddArg(x) v.AddArg(y) @@ -13793,11 +13665,13 @@ func rewriteValue386_OpAdd64F_0(v *Value) bool { } } func rewriteValue386_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDL) v.AddArg(x) v.AddArg(y) @@ -13805,11 +13679,13 @@ func rewriteValue386_OpAdd8_0(v *Value) bool { } } func rewriteValue386_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ADDL) v.AddArg(x) v.AddArg(y) @@ -13817,11 +13693,12 @@ func rewriteValue386_OpAddPtr_0(v *Value) bool { } } func rewriteValue386_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (LEAL {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(Op386LEAL) v.Aux = sym v.AddArg(base) @@ -13829,11 +13706,13 @@ func rewriteValue386_OpAddr_0(v *Value) bool { } } func rewriteValue386_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (ANDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v.AddArg(x) v.AddArg(y) @@ -13841,11 +13720,13 @@ func rewriteValue386_OpAnd16_0(v *Value) bool { } } func rewriteValue386_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (ANDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v.AddArg(x) v.AddArg(y) @@ -13853,11 +13734,13 @@ func rewriteValue386_OpAnd32_0(v *Value) bool { } } func rewriteValue386_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (ANDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v.AddArg(x) v.AddArg(y) @@ -13865,11 +13748,13 @@ func rewriteValue386_OpAnd8_0(v *Value) bool { } } func rewriteValue386_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (ANDL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v.AddArg(x) v.AddArg(y) @@ -13877,11 +13762,13 @@ func rewriteValue386_OpAndB_0(v *Value) bool { } } func rewriteValue386_OpAvg32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Avg32u x y) // result: (AVGLU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386AVGLU) v.AddArg(x) v.AddArg(y) @@ -13889,23 +13776,27 @@ func rewriteValue386_OpAvg32u_0(v *Value) bool { } } func rewriteValue386_OpBswap32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Bswap32 x) // result: (BSWAPL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386BSWAPL) v.AddArg(x) return true } } func rewriteValue386_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(Op386CALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -13915,30 +13806,33 @@ func rewriteValue386_OpClosureCall_0(v *Value) bool { } } func rewriteValue386_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (NOTL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386NOTL) v.AddArg(x) return true } } func rewriteValue386_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (NOTL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386NOTL) v.AddArg(x) return true } } func rewriteValue386_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (NOTL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386NOTL) v.AddArg(x) return true @@ -14014,12 +13908,13 @@ func rewriteValue386_OpConstNil_0(v *Value) bool { } } func rewriteValue386_OpCtz16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) // result: (BSFL (ORLconst [0x10000] x)) for { - x := v.Args[0] + x := v_0 v.reset(Op386BSFL) v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32) v0.AuxInt = 0x10000 @@ -14029,82 +13924,91 @@ func rewriteValue386_OpCtz16_0(v *Value) bool { } } func rewriteValue386_OpCtz16NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz16NonZero x) // result: (BSFL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386BSFL) v.AddArg(x) return true } } func rewriteValue386_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (CVTTSS2SL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386CVTTSS2SL) v.AddArg(x) return true } } func rewriteValue386_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (CVTSS2SD x) for { - x := v.Args[0] + x := v_0 v.reset(Op386CVTSS2SD) v.AddArg(x) return true } } func rewriteValue386_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (CVTSL2SS x) for { - x := v.Args[0] + x := v_0 v.reset(Op386CVTSL2SS) v.AddArg(x) return true } } func rewriteValue386_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (CVTSL2SD x) for { - x := v.Args[0] + x := v_0 v.reset(Op386CVTSL2SD) v.AddArg(x) return true } } func rewriteValue386_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (CVTTSD2SL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386CVTTSD2SL) v.AddArg(x) return true } } func rewriteValue386_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (CVTSD2SS x) for { - x := v.Args[0] + x := v_0 v.reset(Op386CVTSD2SS) v.AddArg(x) return true } } func rewriteValue386_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div16 [a] x y) // result: (DIVW [a] x y) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVW) v.AuxInt = a v.AddArg(x) @@ -14113,11 +14017,13 @@ func rewriteValue386_OpDiv16_0(v *Value) bool { } } func rewriteValue386_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div16u x y) // result: (DIVWU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVWU) v.AddArg(x) v.AddArg(y) @@ -14125,12 +14031,14 @@ func rewriteValue386_OpDiv16u_0(v *Value) bool { } } func rewriteValue386_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32 [a] x y) // result: (DIVL [a] x y) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVL) v.AuxInt = a v.AddArg(x) @@ -14139,11 +14047,13 @@ func rewriteValue386_OpDiv32_0(v *Value) bool { } } func rewriteValue386_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (DIVSS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVSS) v.AddArg(x) v.AddArg(y) @@ -14151,11 +14061,13 @@ func rewriteValue386_OpDiv32F_0(v *Value) bool { } } func rewriteValue386_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32u x y) // result: (DIVLU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVLU) v.AddArg(x) v.AddArg(y) @@ -14163,11 +14075,13 @@ func rewriteValue386_OpDiv32u_0(v *Value) bool { } } func rewriteValue386_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (DIVSD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVSD) v.AddArg(x) v.AddArg(y) @@ -14175,13 +14089,15 @@ func rewriteValue386_OpDiv64F_0(v *Value) bool { } } func rewriteValue386_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (DIVW (SignExt8to16 x) (SignExt8to16 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v0.AddArg(x) @@ -14193,13 +14109,15 @@ func rewriteValue386_OpDiv8_0(v *Value) bool { } } func rewriteValue386_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v0.AddArg(x) @@ -14211,12 +14129,14 @@ func rewriteValue386_OpDiv8u_0(v *Value) bool { } } func rewriteValue386_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq16 x y) // result: (SETEQ (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14226,12 +14146,14 @@ func rewriteValue386_OpEq16_0(v *Value) bool { } } func rewriteValue386_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32 x y) // result: (SETEQ (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14241,12 +14163,14 @@ func rewriteValue386_OpEq32_0(v *Value) bool { } } func rewriteValue386_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (SETEQF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -14256,12 +14180,14 @@ func rewriteValue386_OpEq32F_0(v *Value) bool { } } func rewriteValue386_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (SETEQF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -14271,12 +14197,14 @@ func rewriteValue386_OpEq64F_0(v *Value) bool { } } func rewriteValue386_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq8 x y) // result: (SETEQ (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14286,12 +14214,14 @@ func rewriteValue386_OpEq8_0(v *Value) bool { } } func rewriteValue386_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqB x y) // result: (SETEQ (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14301,12 +14231,14 @@ func rewriteValue386_OpEqB_0(v *Value) bool { } } func rewriteValue386_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (SETEQ (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETEQ) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14316,12 +14248,14 @@ func rewriteValue386_OpEqPtr_0(v *Value) bool { } } func rewriteValue386_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq16 x y) // result: (SETGE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14331,12 +14265,14 @@ func rewriteValue386_OpGeq16_0(v *Value) bool { } } func rewriteValue386_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq16U x y) // result: (SETAE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETAE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14346,12 +14282,14 @@ func rewriteValue386_OpGeq16U_0(v *Value) bool { } } func rewriteValue386_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32 x y) // result: (SETGE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14361,12 +14299,14 @@ func rewriteValue386_OpGeq32_0(v *Value) bool { } } func rewriteValue386_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (SETGEF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -14376,12 +14316,14 @@ func rewriteValue386_OpGeq32F_0(v *Value) bool { } } func rewriteValue386_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32U x y) // result: (SETAE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETAE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14391,12 +14333,14 @@ func rewriteValue386_OpGeq32U_0(v *Value) bool { } } func rewriteValue386_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (SETGEF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -14406,12 +14350,14 @@ func rewriteValue386_OpGeq64F_0(v *Value) bool { } } func rewriteValue386_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq8 x y) // result: (SETGE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14421,12 +14367,14 @@ func rewriteValue386_OpGeq8_0(v *Value) bool { } } func rewriteValue386_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq8U x y) // result: (SETAE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETAE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14460,22 +14408,25 @@ func rewriteValue386_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValue386_OpGetG_0(v *Value) bool { + v_0 := v.Args[0] // match: (GetG mem) // result: (LoweredGetG mem) for { - mem := v.Args[0] + mem := v_0 v.reset(Op386LoweredGetG) v.AddArg(mem) return true } } func rewriteValue386_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater16 x y) // result: (SETG (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETG) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14485,12 +14436,14 @@ func rewriteValue386_OpGreater16_0(v *Value) bool { } } func rewriteValue386_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater16U x y) // result: (SETA (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETA) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14500,12 +14453,14 @@ func rewriteValue386_OpGreater16U_0(v *Value) bool { } } func rewriteValue386_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32 x y) // result: (SETG (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETG) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14515,12 +14470,14 @@ func rewriteValue386_OpGreater32_0(v *Value) bool { } } func rewriteValue386_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (SETGF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -14530,12 +14487,14 @@ func rewriteValue386_OpGreater32F_0(v *Value) bool { } } func rewriteValue386_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32U x y) // result: (SETA (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETA) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14545,12 +14504,14 @@ func rewriteValue386_OpGreater32U_0(v *Value) bool { } } func rewriteValue386_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (SETGF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -14560,12 +14521,14 @@ func rewriteValue386_OpGreater64F_0(v *Value) bool { } } func rewriteValue386_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater8 x y) // result: (SETG (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETG) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14575,12 +14538,14 @@ func rewriteValue386_OpGreater8_0(v *Value) bool { } } func rewriteValue386_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater8U x y) // result: (SETA (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETA) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14590,11 +14555,13 @@ func rewriteValue386_OpGreater8U_0(v *Value) bool { } } func rewriteValue386_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul32 x y) // result: (HMULL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386HMULL) v.AddArg(x) v.AddArg(y) @@ -14602,11 +14569,13 @@ func rewriteValue386_OpHmul32_0(v *Value) bool { } } func rewriteValue386_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul32u x y) // result: (HMULLU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386HMULLU) v.AddArg(x) v.AddArg(y) @@ -14614,12 +14583,14 @@ func rewriteValue386_OpHmul32u_0(v *Value) bool { } } func rewriteValue386_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(Op386CALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -14628,12 +14599,14 @@ func rewriteValue386_OpInterCall_0(v *Value) bool { } } func rewriteValue386_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsInBounds idx len) // result: (SETB (CMPL idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(idx) @@ -14643,11 +14616,12 @@ func rewriteValue386_OpIsInBounds_0(v *Value) bool { } } func rewriteValue386_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (IsNonNil p) // result: (SETNE (TESTL p p)) for { - p := v.Args[0] + p := v_0 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags) v0.AddArg(p) @@ -14657,12 +14631,14 @@ func rewriteValue386_OpIsNonNil_0(v *Value) bool { } } func rewriteValue386_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsSliceInBounds idx len) // result: (SETBE (CMPL idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(idx) @@ -14672,12 +14648,14 @@ func rewriteValue386_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValue386_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq16 x y) // result: (SETLE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14687,12 +14665,14 @@ func rewriteValue386_OpLeq16_0(v *Value) bool { } } func rewriteValue386_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq16U x y) // result: (SETBE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14702,12 +14682,14 @@ func rewriteValue386_OpLeq16U_0(v *Value) bool { } } func rewriteValue386_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32 x y) // result: (SETLE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14717,12 +14699,14 @@ func rewriteValue386_OpLeq32_0(v *Value) bool { } } func rewriteValue386_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (SETGEF (UCOMISS y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) v0.AddArg(y) @@ -14732,12 +14716,14 @@ func rewriteValue386_OpLeq32F_0(v *Value) bool { } } func rewriteValue386_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32U x y) // result: (SETBE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14747,12 +14733,14 @@ func rewriteValue386_OpLeq32U_0(v *Value) bool { } } func rewriteValue386_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (SETGEF (UCOMISD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) v0.AddArg(y) @@ -14762,12 +14750,14 @@ func rewriteValue386_OpLeq64F_0(v *Value) bool { } } func rewriteValue386_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq8 x y) // result: (SETLE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETLE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14777,12 +14767,14 @@ func rewriteValue386_OpLeq8_0(v *Value) bool { } } func rewriteValue386_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq8U x y) // result: (SETBE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETBE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14792,12 +14784,14 @@ func rewriteValue386_OpLeq8U_0(v *Value) bool { } } func rewriteValue386_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less16 x y) // result: (SETL (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14807,12 +14801,14 @@ func rewriteValue386_OpLess16_0(v *Value) bool { } } func rewriteValue386_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less16U x y) // result: (SETB (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -14822,12 +14818,14 @@ func rewriteValue386_OpLess16U_0(v *Value) bool { } } func rewriteValue386_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32 x y) // result: (SETL (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14837,12 +14835,14 @@ func rewriteValue386_OpLess32_0(v *Value) bool { } } func rewriteValue386_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (SETGF (UCOMISS y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) v0.AddArg(y) @@ -14852,12 +14852,14 @@ func rewriteValue386_OpLess32F_0(v *Value) bool { } } func rewriteValue386_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32U x y) // result: (SETB (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -14867,12 +14869,14 @@ func rewriteValue386_OpLess32U_0(v *Value) bool { } } func rewriteValue386_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (SETGF (UCOMISD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETGF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) v0.AddArg(y) @@ -14882,12 +14886,14 @@ func rewriteValue386_OpLess64F_0(v *Value) bool { } } func rewriteValue386_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less8 x y) // result: (SETL (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETL) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14897,12 +14903,14 @@ func rewriteValue386_OpLess8_0(v *Value) bool { } } func rewriteValue386_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less8U x y) // result: (SETB (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETB) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -14912,13 +14920,15 @@ func rewriteValue386_OpLess8U_0(v *Value) bool { } } func rewriteValue386_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: (is32BitInt(t) || isPtr(t)) // result: (MOVLload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) || isPtr(t)) { break } @@ -14932,8 +14942,8 @@ func rewriteValue386_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t)) { break } @@ -14947,8 +14957,8 @@ func rewriteValue386_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean() || is8BitInt(t)) { break } @@ -14962,8 +14972,8 @@ func rewriteValue386_OpLoad_0(v *Value) bool { // result: (MOVSSload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -14977,8 +14987,8 @@ func rewriteValue386_OpLoad_0(v *Value) bool { // result: (MOVSDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -14990,12 +15000,12 @@ func rewriteValue386_OpLoad_0(v *Value) bool { return false } func rewriteValue386_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (LEAL {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(Op386LEAL) v.Aux = sym v.AddArg(base) @@ -15003,13 +15013,15 @@ func rewriteValue386_OpLocalAddr_0(v *Value) bool { } } func rewriteValue386_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x16 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15025,13 +15037,15 @@ func rewriteValue386_OpLsh16x16_0(v *Value) bool { } } func rewriteValue386_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x32 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15047,13 +15061,13 @@ func rewriteValue386_OpLsh16x32_0(v *Value) bool { } } func rewriteValue386_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SHLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15070,8 +15084,6 @@ func rewriteValue386_OpLsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (Const16 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -15086,13 +15098,15 @@ func rewriteValue386_OpLsh16x64_0(v *Value) bool { return false } func rewriteValue386_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x8 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15108,13 +15122,15 @@ func rewriteValue386_OpLsh16x8_0(v *Value) bool { } } func rewriteValue386_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x16 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15130,13 +15146,15 @@ func rewriteValue386_OpLsh32x16_0(v *Value) bool { } } func rewriteValue386_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x32 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15152,13 +15170,13 @@ func rewriteValue386_OpLsh32x32_0(v *Value) bool { } } func rewriteValue386_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SHLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15175,8 +15193,6 @@ func rewriteValue386_OpLsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -15191,13 +15207,15 @@ func rewriteValue386_OpLsh32x64_0(v *Value) bool { return false } func rewriteValue386_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x8 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15213,13 +15231,15 @@ func rewriteValue386_OpLsh32x8_0(v *Value) bool { } } func rewriteValue386_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x16 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15235,13 +15255,15 @@ func rewriteValue386_OpLsh8x16_0(v *Value) bool { } } func rewriteValue386_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x32 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15257,13 +15279,13 @@ func rewriteValue386_OpLsh8x32_0(v *Value) bool { } } func rewriteValue386_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SHLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15280,8 +15302,6 @@ func rewriteValue386_OpLsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (Const8 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -15296,13 +15316,15 @@ func rewriteValue386_OpLsh8x64_0(v *Value) bool { return false } func rewriteValue386_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x8 x y) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15318,12 +15340,14 @@ func rewriteValue386_OpLsh8x8_0(v *Value) bool { } } func rewriteValue386_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod16 [a] x y) // result: (MODW [a] x y) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MODW) v.AuxInt = a v.AddArg(x) @@ -15332,11 +15356,13 @@ func rewriteValue386_OpMod16_0(v *Value) bool { } } func rewriteValue386_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod16u x y) // result: (MODWU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MODWU) v.AddArg(x) v.AddArg(y) @@ -15344,12 +15370,14 @@ func rewriteValue386_OpMod16u_0(v *Value) bool { } } func rewriteValue386_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod32 [a] x y) // result: (MODL [a] x y) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MODL) v.AuxInt = a v.AddArg(x) @@ -15358,11 +15386,13 @@ func rewriteValue386_OpMod32_0(v *Value) bool { } } func rewriteValue386_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod32u x y) // result: (MODLU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MODLU) v.AddArg(x) v.AddArg(y) @@ -15370,13 +15400,15 @@ func rewriteValue386_OpMod32u_0(v *Value) bool { } } func rewriteValue386_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (MODW (SignExt8to16 x) (SignExt8to16 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) v0.AddArg(x) @@ -15388,13 +15420,15 @@ func rewriteValue386_OpMod8_0(v *Value) bool { } } func rewriteValue386_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MODWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) v0.AddArg(x) @@ -15406,6 +15440,9 @@ func rewriteValue386_OpMod8u_0(v *Value) bool { } } func rewriteValue386_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -15414,7 +15451,7 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -15426,9 +15463,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) @@ -15444,9 +15481,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVWstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) @@ -15462,9 +15499,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVLstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) @@ -15480,9 +15517,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -15507,9 +15544,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVBstore) v.AuxInt = 4 v.AddArg(dst) @@ -15534,9 +15571,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVWstore) v.AuxInt = 4 v.AddArg(dst) @@ -15561,9 +15598,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVLstore) v.AuxInt = 3 v.AddArg(dst) @@ -15588,9 +15625,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(Op386MOVLstore) v.AuxInt = 4 v.AddArg(dst) @@ -15614,9 +15651,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { // result: (Move [s-s%4] (ADDLconst dst [s%4]) (ADDLconst src [s%4]) (MOVLstore dst (MOVLload src mem) mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 8 && s%4 != 0) { break } @@ -15643,6 +15680,9 @@ func rewriteValue386_OpMove_0(v *Value) bool { return false } func rewriteValue386_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -15651,9 +15691,9 @@ func rewriteValue386_OpMove_10(v *Value) bool { // result: (DUFFCOPY [10*(128-s/4)] dst src mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) { break } @@ -15669,9 +15709,9 @@ func rewriteValue386_OpMove_10(v *Value) bool { // result: (REPMOVSL dst src (MOVLconst [s/4]) mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !((s > 4*128 || config.noDuffDevice) && s%4 == 0) { break } @@ -15687,11 +15727,13 @@ func rewriteValue386_OpMove_10(v *Value) bool { return false } func rewriteValue386_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (MULL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MULL) v.AddArg(x) v.AddArg(y) @@ -15699,11 +15741,13 @@ func rewriteValue386_OpMul16_0(v *Value) bool { } } func rewriteValue386_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MULL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MULL) v.AddArg(x) v.AddArg(y) @@ -15711,11 +15755,13 @@ func rewriteValue386_OpMul32_0(v *Value) bool { } } func rewriteValue386_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (MULSS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MULSS) v.AddArg(x) v.AddArg(y) @@ -15723,11 +15769,13 @@ func rewriteValue386_OpMul32F_0(v *Value) bool { } } func rewriteValue386_OpMul32uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32uhilo x y) // result: (MULLQU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MULLQU) v.AddArg(x) v.AddArg(y) @@ -15735,11 +15783,13 @@ func rewriteValue386_OpMul32uhilo_0(v *Value) bool { } } func rewriteValue386_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (MULSD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MULSD) v.AddArg(x) v.AddArg(y) @@ -15747,11 +15797,13 @@ func rewriteValue386_OpMul64F_0(v *Value) bool { } } func rewriteValue386_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (MULL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386MULL) v.AddArg(x) v.AddArg(y) @@ -15759,26 +15811,29 @@ func rewriteValue386_OpMul8_0(v *Value) bool { } } func rewriteValue386_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (NEGL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v.AddArg(x) return true } } func rewriteValue386_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (NEGL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v.AddArg(x) return true } } func rewriteValue386_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -15786,7 +15841,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool { // cond: !config.use387 // result: (PXOR x (MOVSSconst [auxFrom32F(float32(math.Copysign(0, -1)))])) for { - x := v.Args[0] + x := v_0 if !(!config.use387) { break } @@ -15801,7 +15856,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool { // cond: config.use387 // result: (FCHS x) for { - x := v.Args[0] + x := v_0 if !(config.use387) { break } @@ -15812,6 +15867,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool { return false } func rewriteValue386_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -15819,7 +15875,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool { // cond: !config.use387 // result: (PXOR x (MOVSDconst [auxFrom64F(math.Copysign(0, -1))])) for { - x := v.Args[0] + x := v_0 if !(!config.use387) { break } @@ -15834,7 +15890,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool { // cond: config.use387 // result: (FCHS x) for { - x := v.Args[0] + x := v_0 if !(config.use387) { break } @@ -15845,22 +15901,25 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool { return false } func rewriteValue386_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (NEGL x) for { - x := v.Args[0] + x := v_0 v.reset(Op386NEGL) v.AddArg(x) return true } } func rewriteValue386_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq16 x y) // result: (SETNE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) v0.AddArg(x) @@ -15870,12 +15929,14 @@ func rewriteValue386_OpNeq16_0(v *Value) bool { } } func rewriteValue386_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32 x y) // result: (SETNE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -15885,12 +15946,14 @@ func rewriteValue386_OpNeq32_0(v *Value) bool { } } func rewriteValue386_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (SETNEF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNEF) v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -15900,12 +15963,14 @@ func rewriteValue386_OpNeq32F_0(v *Value) bool { } } func rewriteValue386_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (SETNEF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNEF) v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -15915,12 +15980,14 @@ func rewriteValue386_OpNeq64F_0(v *Value) bool { } } func rewriteValue386_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq8 x y) // result: (SETNE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -15930,12 +15997,14 @@ func rewriteValue386_OpNeq8_0(v *Value) bool { } } func rewriteValue386_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqB x y) // result: (SETNE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) v0.AddArg(x) @@ -15945,12 +16014,14 @@ func rewriteValue386_OpNeqB_0(v *Value) bool { } } func rewriteValue386_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (SETNE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SETNE) v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) v0.AddArg(x) @@ -15960,11 +16031,13 @@ func rewriteValue386_OpNeqPtr_0(v *Value) bool { } } func rewriteValue386_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(Op386LoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -15972,10 +16045,11 @@ func rewriteValue386_OpNilCheck_0(v *Value) bool { } } func rewriteValue386_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORLconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(Op386XORLconst) v.AuxInt = 1 v.AddArg(x) @@ -15983,11 +16057,12 @@ func rewriteValue386_OpNot_0(v *Value) bool { } } func rewriteValue386_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr [off] ptr) // result: (ADDLconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(Op386ADDLconst) v.AuxInt = off v.AddArg(ptr) @@ -15995,11 +16070,13 @@ func rewriteValue386_OpOffPtr_0(v *Value) bool { } } func rewriteValue386_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (ORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ORL) v.AddArg(x) v.AddArg(y) @@ -16007,11 +16084,13 @@ func rewriteValue386_OpOr16_0(v *Value) bool { } } func rewriteValue386_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (ORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ORL) v.AddArg(x) v.AddArg(y) @@ -16019,11 +16098,13 @@ func rewriteValue386_OpOr32_0(v *Value) bool { } } func rewriteValue386_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (ORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ORL) v.AddArg(x) v.AddArg(y) @@ -16031,11 +16112,13 @@ func rewriteValue386_OpOr8_0(v *Value) bool { } } func rewriteValue386_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (ORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ORL) v.AddArg(x) v.AddArg(y) @@ -16043,14 +16126,17 @@ func rewriteValue386_OpOrB_0(v *Value) bool { } } func rewriteValue386_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -16066,9 +16152,9 @@ func rewriteValue386_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -16084,9 +16170,9 @@ func rewriteValue386_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -16100,15 +16186,19 @@ func rewriteValue386_OpPanicBounds_0(v *Value) bool { return false } func rewriteValue386_OpPanicExtend_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicExtend [kind] hi lo y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicExtendA [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 0) { break } @@ -16125,10 +16215,10 @@ func rewriteValue386_OpPanicExtend_0(v *Value) bool { // result: (LoweredPanicExtendB [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 1) { break } @@ -16145,10 +16235,10 @@ func rewriteValue386_OpPanicExtend_0(v *Value) bool { // result: (LoweredPanicExtendC [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 2) { break } @@ -16163,12 +16253,12 @@ func rewriteValue386_OpPanicExtend_0(v *Value) bool { return false } func rewriteValue386_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft16 x (MOVLconst [c])) // result: (ROLWconst [c&15] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -16181,12 +16271,12 @@ func rewriteValue386_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValue386_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft32 x (MOVLconst [c])) // result: (ROLLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -16199,12 +16289,12 @@ func rewriteValue386_OpRotateLeft32_0(v *Value) bool { return false } func rewriteValue386_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft8 x (MOVLconst [c])) // result: (ROLBconst [c&7] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != Op386MOVLconst { break } @@ -16217,10 +16307,11 @@ func rewriteValue386_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValue386_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -16228,10 +16319,11 @@ func rewriteValue386_OpRound32F_0(v *Value) bool { } } func rewriteValue386_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -16239,13 +16331,15 @@ func rewriteValue386_OpRound64F_0(v *Value) bool { } } func rewriteValue386_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux16 x y) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) v0.AddArg(x) @@ -16261,13 +16355,15 @@ func rewriteValue386_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValue386_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux32 x y) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) v0.AddArg(x) @@ -16283,13 +16379,13 @@ func rewriteValue386_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValue386_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh16Ux64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SHRWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16306,8 +16402,6 @@ func rewriteValue386_OpRsh16Ux64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (Const16 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16322,13 +16416,15 @@ func rewriteValue386_OpRsh16Ux64_0(v *Value) bool { return false } func rewriteValue386_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux8 x y) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) v0.AddArg(x) @@ -16344,13 +16440,15 @@ func rewriteValue386_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValue386_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x16 x y) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARW) v.Type = t v.AddArg(x) @@ -16369,13 +16467,15 @@ func rewriteValue386_OpRsh16x16_0(v *Value) bool { } } func rewriteValue386_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x32 x y) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARW) v.Type = t v.AddArg(x) @@ -16394,13 +16494,13 @@ func rewriteValue386_OpRsh16x32_0(v *Value) bool { } } func rewriteValue386_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SARWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16417,9 +16517,7 @@ func rewriteValue386_OpRsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (SARWconst x [15]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16435,13 +16533,15 @@ func rewriteValue386_OpRsh16x64_0(v *Value) bool { return false } func rewriteValue386_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x8 x y) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARW) v.Type = t v.AddArg(x) @@ -16460,13 +16560,15 @@ func rewriteValue386_OpRsh16x8_0(v *Value) bool { } } func rewriteValue386_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux16 x y) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) v0.AddArg(x) @@ -16482,13 +16584,15 @@ func rewriteValue386_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValue386_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux32 x y) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) v0.AddArg(x) @@ -16504,13 +16608,13 @@ func rewriteValue386_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValue386_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh32Ux64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SHRLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16527,8 +16631,6 @@ func rewriteValue386_OpRsh32Ux64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16543,13 +16645,15 @@ func rewriteValue386_OpRsh32Ux64_0(v *Value) bool { return false } func rewriteValue386_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux8 x y) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) v0.AddArg(x) @@ -16565,13 +16669,15 @@ func rewriteValue386_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValue386_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x16 x y) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARL) v.Type = t v.AddArg(x) @@ -16590,13 +16696,15 @@ func rewriteValue386_OpRsh32x16_0(v *Value) bool { } } func rewriteValue386_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x32 x y) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARL) v.Type = t v.AddArg(x) @@ -16615,13 +16723,13 @@ func rewriteValue386_OpRsh32x32_0(v *Value) bool { } } func rewriteValue386_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SARLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16638,9 +16746,7 @@ func rewriteValue386_OpRsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (SARLconst x [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16656,13 +16762,15 @@ func rewriteValue386_OpRsh32x64_0(v *Value) bool { return false } func rewriteValue386_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x8 x y) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARL) v.Type = t v.AddArg(x) @@ -16681,13 +16789,15 @@ func rewriteValue386_OpRsh32x8_0(v *Value) bool { } } func rewriteValue386_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux16 x y) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) v0.AddArg(x) @@ -16703,13 +16813,15 @@ func rewriteValue386_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValue386_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux32 x y) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) v0.AddArg(x) @@ -16725,13 +16837,13 @@ func rewriteValue386_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValue386_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SHRBconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16748,8 +16860,6 @@ func rewriteValue386_OpRsh8Ux64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (Const8 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16764,13 +16874,15 @@ func rewriteValue386_OpRsh8Ux64_0(v *Value) bool { return false } func rewriteValue386_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux8 x y) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) v0.AddArg(x) @@ -16786,13 +16898,15 @@ func rewriteValue386_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValue386_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x16 x y) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARB) v.Type = t v.AddArg(x) @@ -16811,13 +16925,15 @@ func rewriteValue386_OpRsh8x16_0(v *Value) bool { } } func rewriteValue386_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x32 x y) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARB) v.Type = t v.AddArg(x) @@ -16836,13 +16952,13 @@ func rewriteValue386_OpRsh8x32_0(v *Value) bool { } } func rewriteValue386_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SARBconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16859,9 +16975,7 @@ func rewriteValue386_OpRsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (SARBconst x [7]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16877,13 +16991,15 @@ func rewriteValue386_OpRsh8x64_0(v *Value) bool { return false } func rewriteValue386_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x8 x y) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SARB) v.Type = t v.AddArg(x) @@ -16902,12 +17018,12 @@ func rewriteValue386_OpRsh8x8_0(v *Value) bool { } } func rewriteValue386_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select0 (Mul32uover x y)) // result: (Select0 (MULLU x y)) for { - v_0 := v.Args[0] if v_0.Op != OpMul32uover { break } @@ -16924,12 +17040,12 @@ func rewriteValue386_OpSelect0_0(v *Value) bool { return false } func rewriteValue386_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select1 (Mul32uover x y)) // result: (SETO (Select1 (MULLU x y))) for { - v_0 := v.Args[0] if v_0.Op != OpMul32uover { break } @@ -16947,40 +17063,44 @@ func rewriteValue386_OpSelect1_0(v *Value) bool { return false } func rewriteValue386_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVWLSX x) for { - x := v.Args[0] + x := v_0 v.reset(Op386MOVWLSX) v.AddArg(x) return true } } func rewriteValue386_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBLSX x) for { - x := v.Args[0] + x := v_0 v.reset(Op386MOVBLSX) v.AddArg(x) return true } } func rewriteValue386_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBLSX x) for { - x := v.Args[0] + x := v_0 v.reset(Op386MOVBLSX) v.AddArg(x) return true } } func rewriteValue386_OpSignmask_0(v *Value) bool { + v_0 := v.Args[0] // match: (Signmask x) // result: (SARLconst x [31]) for { - x := v.Args[0] + x := v_0 v.reset(Op386SARLconst) v.AuxInt = 31 v.AddArg(x) @@ -16988,12 +17108,13 @@ func rewriteValue386_OpSignmask_0(v *Value) bool { } } func rewriteValue386_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SARLconst (NEGL x) [31]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(Op386SARLconst) v.AuxInt = 31 v0 := b.NewValue0(v.Pos, Op386NEGL, t) @@ -17003,22 +17124,24 @@ func rewriteValue386_OpSlicemask_0(v *Value) bool { } } func rewriteValue386_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (SQRTSD x) for { - x := v.Args[0] + x := v_0 v.reset(Op386SQRTSD) v.AddArg(x) return true } } func rewriteValue386_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(Op386CALLstatic) v.AuxInt = argwid v.Aux = target @@ -17027,14 +17150,17 @@ func rewriteValue386_OpStaticCall_0(v *Value) bool { } } func rewriteValue386_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) // result: (MOVSDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -17049,9 +17175,9 @@ func rewriteValue386_OpStore_0(v *Value) bool { // result: (MOVSSstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -17066,9 +17192,9 @@ func rewriteValue386_OpStore_0(v *Value) bool { // result: (MOVLstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4) { break } @@ -17083,9 +17209,9 @@ func rewriteValue386_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -17100,9 +17226,9 @@ func rewriteValue386_OpStore_0(v *Value) bool { // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -17115,11 +17241,13 @@ func rewriteValue386_OpStore_0(v *Value) bool { return false } func rewriteValue386_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUBL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBL) v.AddArg(x) v.AddArg(y) @@ -17127,11 +17255,13 @@ func rewriteValue386_OpSub16_0(v *Value) bool { } } func rewriteValue386_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUBL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBL) v.AddArg(x) v.AddArg(y) @@ -17139,11 +17269,13 @@ func rewriteValue386_OpSub32_0(v *Value) bool { } } func rewriteValue386_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (SUBSS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBSS) v.AddArg(x) v.AddArg(y) @@ -17151,11 +17283,13 @@ func rewriteValue386_OpSub32F_0(v *Value) bool { } } func rewriteValue386_OpSub32carry_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32carry x y) // result: (SUBLcarry x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBLcarry) v.AddArg(x) v.AddArg(y) @@ -17163,12 +17297,15 @@ func rewriteValue386_OpSub32carry_0(v *Value) bool { } } func rewriteValue386_OpSub32withcarry_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32withcarry x y c) // result: (SBBL x y c) for { - c := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + c := v_2 v.reset(Op386SBBL) v.AddArg(x) v.AddArg(y) @@ -17177,11 +17314,13 @@ func rewriteValue386_OpSub32withcarry_0(v *Value) bool { } } func rewriteValue386_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (SUBSD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBSD) v.AddArg(x) v.AddArg(y) @@ -17189,11 +17328,13 @@ func rewriteValue386_OpSub64F_0(v *Value) bool { } } func rewriteValue386_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUBL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBL) v.AddArg(x) v.AddArg(y) @@ -17201,11 +17342,13 @@ func rewriteValue386_OpSub8_0(v *Value) bool { } } func rewriteValue386_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUBL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386SUBL) v.AddArg(x) v.AddArg(y) @@ -17213,10 +17356,11 @@ func rewriteValue386_OpSubPtr_0(v *Value) bool { } } func rewriteValue386_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17224,10 +17368,11 @@ func rewriteValue386_OpTrunc16to8_0(v *Value) bool { } } func rewriteValue386_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17235,10 +17380,11 @@ func rewriteValue386_OpTrunc32to16_0(v *Value) bool { } } func rewriteValue386_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17246,13 +17392,16 @@ func rewriteValue386_OpTrunc32to8_0(v *Value) bool { } } func rewriteValue386_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(Op386LoweredWB) v.Aux = fn v.AddArg(destptr) @@ -17262,11 +17411,13 @@ func rewriteValue386_OpWB_0(v *Value) bool { } } func rewriteValue386_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386XORL) v.AddArg(x) v.AddArg(y) @@ -17274,11 +17425,13 @@ func rewriteValue386_OpXor16_0(v *Value) bool { } } func rewriteValue386_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386XORL) v.AddArg(x) v.AddArg(y) @@ -17286,11 +17439,13 @@ func rewriteValue386_OpXor32_0(v *Value) bool { } } func rewriteValue386_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XORL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(Op386XORL) v.AddArg(x) v.AddArg(y) @@ -17298,6 +17453,8 @@ func rewriteValue386_OpXor8_0(v *Value) bool { } } func rewriteValue386_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [0] _ mem) @@ -17306,7 +17463,7 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -17318,8 +17475,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -17332,8 +17489,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVWstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -17346,8 +17503,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -17360,8 +17517,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) v.AddArg(destptr) @@ -17378,8 +17535,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -17396,8 +17553,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVWstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -17414,8 +17571,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 3) v.AddArg(destptr) @@ -17431,8 +17588,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { // result: (Zero [s-s%4] (ADDLconst destptr [s%4]) (MOVLstoreconst [0] destptr mem)) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s%4 != 0 && s > 4) { break } @@ -17455,8 +17612,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -17470,6 +17627,8 @@ func rewriteValue386_OpZero_0(v *Value) bool { return false } func rewriteValue386_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -17479,8 +17638,8 @@ func rewriteValue386_OpZero_10(v *Value) bool { if v.AuxInt != 12 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 8) v.AddArg(destptr) @@ -17501,8 +17660,8 @@ func rewriteValue386_OpZero_10(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(Op386MOVLstoreconst) v.AuxInt = makeValAndOff(0, 12) v.AddArg(destptr) @@ -17526,8 +17685,8 @@ func rewriteValue386_OpZero_10(v *Value) bool { // result: (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) { break } @@ -17545,8 +17704,8 @@ func rewriteValue386_OpZero_10(v *Value) bool { // result: (REPSTOSL destptr (MOVLconst [s/4]) (MOVLconst [0]) mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !((s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0) { break } @@ -17564,42 +17723,46 @@ func rewriteValue386_OpZero_10(v *Value) bool { return false } func rewriteValue386_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVWLZX x) for { - x := v.Args[0] + x := v_0 v.reset(Op386MOVWLZX) v.AddArg(x) return true } } func rewriteValue386_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBLZX x) for { - x := v.Args[0] + x := v_0 v.reset(Op386MOVBLZX) v.AddArg(x) return true } } func rewriteValue386_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBLZX x) for { - x := v.Args[0] + x := v_0 v.reset(Op386MOVBLZX) v.AddArg(x) return true } } func rewriteValue386_OpZeromask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Zeromask x) // result: (XORLconst [-1] (SBBLcarrymask (CMPLconst x [1]))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(Op386XORLconst) v.AuxInt = -1 v0 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go index 2698e35834..9f1c71aa18 100644 --- a/src/cmd/compile/internal/ssa/rewrite386splitload.go +++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go @@ -21,6 +21,8 @@ func rewriteValue386splitload(v *Value) bool { return false } func rewriteValue386splitload_Op386CMPBconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPBconstload {sym} [vo] ptr mem) @@ -28,8 +30,8 @@ func rewriteValue386splitload_Op386CMPBconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(Op386CMPBconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) @@ -42,6 +44,9 @@ func rewriteValue386splitload_Op386CMPBconstload_0(v *Value) bool { } } func rewriteValue386splitload_Op386CMPBload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPBload {sym} [off] ptr x mem) @@ -49,9 +54,9 @@ func rewriteValue386splitload_Op386CMPBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(Op386CMPB) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) v0.AuxInt = off @@ -64,6 +69,8 @@ func rewriteValue386splitload_Op386CMPBload_0(v *Value) bool { } } func rewriteValue386splitload_Op386CMPLconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPLconstload {sym} [vo] ptr mem) @@ -71,8 +78,8 @@ func rewriteValue386splitload_Op386CMPLconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(Op386CMPLconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) @@ -85,6 +92,9 @@ func rewriteValue386splitload_Op386CMPLconstload_0(v *Value) bool { } } func rewriteValue386splitload_Op386CMPLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPLload {sym} [off] ptr x mem) @@ -92,9 +102,9 @@ func rewriteValue386splitload_Op386CMPLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(Op386CMPL) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) v0.AuxInt = off @@ -107,6 +117,8 @@ func rewriteValue386splitload_Op386CMPLload_0(v *Value) bool { } } func rewriteValue386splitload_Op386CMPWconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPWconstload {sym} [vo] ptr mem) @@ -114,8 +126,8 @@ func rewriteValue386splitload_Op386CMPWconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(Op386CMPWconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) @@ -128,6 +140,9 @@ func rewriteValue386splitload_Op386CMPWconstload_0(v *Value) bool { } } func rewriteValue386splitload_Op386CMPWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPWload {sym} [off] ptr x mem) @@ -135,9 +150,9 @@ func rewriteValue386splitload_Op386CMPWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(Op386CMPW) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) v0.AuxInt = off diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 388c8645be..3c2e57b1c3 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1275,18 +1275,20 @@ func rewriteValueAMD64(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADCQ x (MOVQconst [c]) carry) // cond: is32Bit(c) // result: (ADCQconst x [c] carry) for { - carry := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt + carry := v_2 if !(is32Bit(c)) { continue } @@ -1301,10 +1303,8 @@ func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { // match: (ADCQ x y (FlagEQ)) // result: (ADDQcarry x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64FlagEQ { break } @@ -1316,13 +1316,13 @@ func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADCQconst x [c] (FlagEQ)) // result: (ADDQconstcarry x [c]) for { c := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64FlagEQ { break } @@ -1334,13 +1334,13 @@ func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDL x (MOVLconst [c])) // result: (ADDLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVLconst { continue } @@ -1356,15 +1356,12 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // cond: d==32-c // result: (ROLLconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRLconst { continue } @@ -1384,15 +1381,12 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // result: (ROLWconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRWconst { continue } @@ -1412,15 +1406,12 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // result: (ROLBconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRBconst { continue } @@ -1438,10 +1429,8 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (SHLLconst [3] y)) // result: (LEAL8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { continue } @@ -1456,10 +1445,8 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (SHLLconst [2] y)) // result: (LEAL4 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { continue } @@ -1474,10 +1461,8 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (SHLLconst [1] y)) // result: (LEAL2 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { continue } @@ -1492,10 +1477,8 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (ADDL y y)) // result: (LEAL2 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64ADDL { continue } @@ -1513,19 +1496,19 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (ADDL x y)) // result: (LEAL2 y x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64ADDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpAMD64LEAL2) v.AddArg(y) v.AddArg(x) @@ -1537,15 +1520,13 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL (ADDLconst [c] x) y) // result: (LEAL1 [c] x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpAMD64LEAL1) v.AuxInt = c v.AddArg(x) @@ -1557,14 +1538,14 @@ func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDL x (LEAL [c] {s} y)) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAL1 [c] {s} x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64LEAL { continue } @@ -1586,10 +1567,8 @@ func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { // match: (ADDL x (NEGL y)) // result: (SUBL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64NEGL { continue } @@ -1605,10 +1584,9 @@ func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVLload { continue } @@ -1632,11 +1610,11 @@ func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDLconst [c] (ADDL x y)) // result: (LEAL1 [c] x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDL { break } @@ -1652,7 +1630,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (LEAL1 [c] x x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { break } @@ -1668,7 +1645,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (LEAL [c+d] {s} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } @@ -1689,7 +1665,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (LEAL1 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL1 { break } @@ -1712,7 +1687,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (LEAL2 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL2 { break } @@ -1735,7 +1709,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (LEAL4 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL4 { break } @@ -1758,7 +1731,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (LEAL8 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL8 { break } @@ -1781,7 +1753,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -1794,7 +1766,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (MOVLconst [int64(int32(c+d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLconst { break } @@ -1807,7 +1778,6 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { // result: (ADDLconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } @@ -1821,11 +1791,12 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (ADDLconst [off] x:(SP)) // result: (LEAL [off] x) for { off := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpSP { break } @@ -1837,19 +1808,20 @@ func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) // cond: ValAndOff(valoff1).canAdd(off2) // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -1866,14 +1838,13 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { break } @@ -1887,6 +1858,9 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -1895,14 +1869,13 @@ func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -1920,15 +1893,14 @@ func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -1945,10 +1917,8 @@ func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -1967,20 +1937,22 @@ func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(off1+off2) // result: (ADDLmodify [off1+off2] {sym} base val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -1998,15 +1970,14 @@ func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -2021,14 +1992,14 @@ func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDQ x (MOVQconst [c])) // cond: is32Bit(c) // result: (ADDQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } @@ -2047,15 +2018,12 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // cond: d==64-c // result: (ROLQconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLQconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRQconst { continue } @@ -2073,10 +2041,8 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (SHLQconst [3] y)) // result: (LEAQ8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { continue } @@ -2091,10 +2057,8 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (SHLQconst [2] y)) // result: (LEAQ4 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { continue } @@ -2109,10 +2073,8 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (SHLQconst [1] y)) // result: (LEAQ2 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { continue } @@ -2127,10 +2089,8 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (ADDQ y y)) // result: (LEAQ2 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64ADDQ { continue } @@ -2148,19 +2108,19 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (ADDQ x y)) // result: (LEAQ2 y x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64ADDQ { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpAMD64LEAQ2) v.AddArg(y) v.AddArg(x) @@ -2172,15 +2132,13 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ (ADDQconst [c] x) y) // result: (LEAQ1 [c] x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } c := v_0.AuxInt x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpAMD64LEAQ1) v.AuxInt = c v.AddArg(x) @@ -2193,10 +2151,8 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAQ1 [c] {s} x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64LEAQ { continue } @@ -2218,10 +2174,8 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (NEGQ y)) // result: (SUBQ x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64NEGQ { continue } @@ -2236,14 +2190,15 @@ func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDQload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVQload { continue } @@ -2267,14 +2222,14 @@ func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDQcarry x (MOVQconst [c])) // cond: is32Bit(c) // result: (ADDQconstcarry x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } @@ -2292,11 +2247,11 @@ func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDQconst [c] (ADDQ x y)) // result: (LEAQ1 [c] x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } @@ -2312,7 +2267,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (LEAQ1 [c] x x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { break } @@ -2328,7 +2282,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (LEAQ [c+d] {s} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } @@ -2349,7 +2302,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (LEAQ1 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -2372,7 +2324,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (LEAQ2 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ2 { break } @@ -2395,7 +2346,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (LEAQ4 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ4 { break } @@ -2418,7 +2368,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (LEAQ8 [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -2442,7 +2391,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -2452,7 +2401,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (MOVQconst [c+d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -2466,7 +2414,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // result: (ADDQconst [c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } @@ -2483,11 +2430,12 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (ADDQconst [off] x:(SP)) // result: (LEAQ [off] x) for { off := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpSP { break } @@ -2499,19 +2447,20 @@ func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) // cond: ValAndOff(valoff1).canAdd(off2) // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) for { valoff1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2)) { break } @@ -2528,14 +2477,13 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool { for { valoff1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { break } @@ -2549,6 +2497,9 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -2557,14 +2508,13 @@ func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -2582,15 +2532,14 @@ func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -2607,10 +2556,8 @@ func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -2629,20 +2576,22 @@ func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(off1+off2) // result: (ADDQmodify [off1+off2] {sym} base val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -2660,15 +2609,14 @@ func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -2683,14 +2631,15 @@ func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVSDload { continue } @@ -2714,6 +2663,9 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -2722,14 +2674,13 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -2747,15 +2698,14 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -2772,10 +2722,8 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -2794,14 +2742,15 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVSSload { continue } @@ -2825,6 +2774,9 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -2833,14 +2785,13 @@ func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -2858,15 +2809,14 @@ func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -2883,10 +2833,8 @@ func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -2905,12 +2853,12 @@ func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) // result: (BTRL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64NOTL { continue } @@ -2923,7 +2871,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpAMD64BTRL) v.AddArg(x) v.AddArg(y) @@ -2935,14 +2883,12 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 // result: (BTRLconst [log2uint32(^c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { continue } @@ -2956,10 +2902,8 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // match: (ANDL x (MOVLconst [c])) // result: (ANDLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVLconst { continue } @@ -2974,8 +2918,8 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // match: (ANDL x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -2987,10 +2931,9 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVLload { continue } @@ -3014,12 +2957,13 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDLconst [c] x) // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 // result: (BTRLconst [log2uint32(^c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { break } @@ -3032,7 +2976,6 @@ func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { // result: (ANDLconst [c & d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -3047,7 +2990,6 @@ func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { // result: (ANDLconst [c &^ (1<= 128 // result: (BTRQconst [log2(^c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { continue } @@ -3358,10 +3297,8 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { // cond: is32Bit(c) // result: (ANDQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } @@ -3379,8 +3316,8 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { // match: (ANDQ x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -3392,10 +3329,9 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDQload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVQload { continue } @@ -3419,12 +3355,13 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDQconst [c] x) // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 // result: (BTRQconst [log2(^c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { break } @@ -3437,7 +3374,6 @@ func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { // result: (ANDQconst [c & d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDQconst { break } @@ -3452,7 +3388,6 @@ func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { // result: (ANDQconst [c &^ (1< [1<<8] (MOVBQZX x))) // result: (BSFQ (ORQconst [1<<8] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ORQconst { break } @@ -3745,7 +3679,6 @@ func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { // match: (BSFQ (ORQconst [1<<16] (MOVWQZX x))) // result: (BSFQ (ORQconst [1<<16] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ORQconst { break } @@ -3768,11 +3701,11 @@ func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (BTCLconst [c] (XORLconst [d] x)) // result: (XORLconst [d ^ 1< [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVBload { break } @@ -10604,7 +10156,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVWload { break } @@ -10629,7 +10181,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLload { break } @@ -10654,7 +10206,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVQload { break } @@ -10679,7 +10231,6 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { // cond: c & 0x80 == 0 // result: (ANDLconst [c & 0x7f] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -10696,7 +10247,6 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { // match: (MOVBQSX (MOVBQSX x)) // result: (MOVBQSX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVBQSX { break } @@ -10708,15 +10258,15 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVBQSX x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVBstore { break } @@ -10738,14 +10288,13 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10759,12 +10308,13 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVBload { break } @@ -10789,7 +10339,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVWload { break } @@ -10814,7 +10364,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLload { break } @@ -10839,7 +10389,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVQload { break } @@ -10864,7 +10414,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // cond: zeroUpper56Bits(x,3) // result: x for { - x := v.Args[0] + x := v_0 if !(zeroUpper56Bits(x, 3)) { break } @@ -10877,7 +10427,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVBloadidx1 { break } @@ -10903,7 +10453,6 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // match: (MOVBQZX (ANDLconst [c] x)) // result: (ANDLconst [c & 0xff] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -10917,7 +10466,6 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { // match: (MOVBQZX (MOVBQZX x)) // result: (MOVBQZX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVBQZX { break } @@ -10929,19 +10477,20 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBatomicload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBatomicload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -10958,14 +10507,13 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10979,15 +10527,15 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVBQZX x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVBstore { break } @@ -11009,13 +10557,12 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -11032,14 +10579,13 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -11056,8 +10602,6 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -11065,6 +10609,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -11082,15 +10627,16 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -11110,14 +10656,13 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -11134,13 +10679,12 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -11157,8 +10701,6 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -11169,21 +10711,23 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -11203,15 +10747,14 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -11231,14 +10774,13 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + c)) { continue } @@ -11254,19 +10796,22 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) // cond: y.Uses == 1 // result: (SETLstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETL { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11284,13 +10829,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETLE { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11308,13 +10853,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETG { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11332,13 +10877,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETGE { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11356,13 +10901,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETEQ { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11380,13 +10925,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETNE { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11404,13 +10949,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETB { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11428,13 +10973,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETBE { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11452,13 +10997,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETA { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11476,13 +11021,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SETAE { break } x := y.Args[0] + mem := v_2 if !(y.Uses == 1) { break } @@ -11497,19 +11042,21 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVBQSX { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -11523,13 +11070,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVBQZX { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -11544,14 +11090,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -11569,13 +11114,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -11592,13 +11136,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -11615,15 +11158,14 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -11641,8 +11183,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -11650,7 +11190,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -11669,16 +11210,17 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -11699,10 +11241,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { break } @@ -11731,10 +11272,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x2 := v.Args[2] + p := v_0 + w := v_1 + x2 := v_2 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-1 || x2.Aux != s { break } @@ -11783,6 +11323,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) @@ -11791,10 +11334,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x6 := v.Args[2] + p := v_0 + w := v_1 + x6 := v_2 if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i-1 || x6.Aux != s { break } @@ -11894,14 +11436,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11923,14 +11463,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11952,14 +11490,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11981,10 +11517,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { break } @@ -12010,10 +11545,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { break } @@ -12039,10 +11573,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { break } @@ -12068,15 +11601,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -12102,15 +11633,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRQconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -12136,9 +11665,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - x1 := v.Args[1] + p := v_0 + x1 := v_1 if x1.Op != OpAMD64MOVBload { break } @@ -12146,7 +11674,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { s2 := x1.Aux mem := x1.Args[1] p2 := x1.Args[0] - mem2 := v.Args[2] + mem2 := v_2 if mem2.Op != OpAMD64MOVBstore || mem2.AuxInt != i-1 || mem2.Aux != s { break } @@ -12178,21 +11706,23 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -12210,14 +11740,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -12232,19 +11761,20 @@ func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) // cond: ValAndOff(sc).canAdd(off) // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -12261,14 +11791,13 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -12285,8 +11814,6 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -12294,6 +11821,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -12310,13 +11838,12 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -12331,9 +11858,8 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVBstoreconst { break } @@ -12358,9 +11884,8 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { a := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVBstoreconst { break } @@ -12385,14 +11910,13 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -12409,13 +11933,12 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -12429,21 +11952,23 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) // cond: ValAndOff(x).canAdd(c) // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -12463,15 +11988,14 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -12491,11 +12015,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - i := v.Args[1^_i0] - x := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + i := v_1 + x := v_2 if x.Op != OpAMD64MOVBstoreconstidx1 { continue } @@ -12504,8 +12027,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { continue } mem := x.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || i != x.Args[1^_i1] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || i != x_1 || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { continue } v.reset(OpAMD64MOVWstoreconstidx1) @@ -12522,6 +12047,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) @@ -12529,16 +12058,15 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -12559,16 +12087,15 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -12589,18 +12116,19 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x0 := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x0 := v_3 if x0.Op != OpAMD64MOVBstoreidx1 || x0.AuxInt != i-1 || x0.Aux != s { continue } mem := x0.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x0.Args[_i1] || idx != x0.Args[1^_i1] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 { continue } x0_2 := x0.Args[2] @@ -12628,18 +12156,19 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x2 := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x2 := v_3 if x2.Op != OpAMD64MOVBstoreidx1 || x2.AuxInt != i-1 || x2.Aux != s { continue } _ = x2.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x2.Args[_i1] || idx != x2.Args[1^_i1] { + x2_0 := x2.Args[0] + x2_1 := x2.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x2_0, x2_1 = _i1+1, x2_1, x2_0 { + if p != x2_0 || idx != x2_1 { continue } x2_2 := x2.Args[2] @@ -12651,8 +12180,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } _ = x1.Args[3] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 { continue } x1_2 := x1.Args[2] @@ -12664,8 +12195,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } mem := x0.Args[3] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 { continue } x0_2 := x0.Args[2] @@ -12694,18 +12227,19 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x6 := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x6 := v_3 if x6.Op != OpAMD64MOVBstoreidx1 || x6.AuxInt != i-1 || x6.Aux != s { continue } _ = x6.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x6.Args[_i1] || idx != x6.Args[1^_i1] { + x6_0 := x6.Args[0] + x6_1 := x6.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x6_0, x6_1 = _i1+1, x6_1, x6_0 { + if p != x6_0 || idx != x6_1 { continue } x6_2 := x6.Args[2] @@ -12717,8 +12251,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } _ = x5.Args[3] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x5.Args[_i2] || idx != x5.Args[1^_i2] { + x5_0 := x5.Args[0] + x5_1 := x5.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x5_0, x5_1 = _i2+1, x5_1, x5_0 { + if p != x5_0 || idx != x5_1 { continue } x5_2 := x5.Args[2] @@ -12730,8 +12266,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } _ = x4.Args[3] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x4.Args[_i3] || idx != x4.Args[1^_i3] { + x4_0 := x4.Args[0] + x4_1 := x4.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x4_0, x4_1 = _i3+1, x4_1, x4_0 { + if p != x4_0 || idx != x4_1 { continue } x4_2 := x4.Args[2] @@ -12743,8 +12281,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } _ = x3.Args[3] - for _i4 := 0; _i4 <= 1; _i4++ { - if p != x3.Args[_i4] || idx != x3.Args[1^_i4] { + x3_0 := x3.Args[0] + x3_1 := x3.Args[1] + for _i4 := 0; _i4 <= 1; _i4, x3_0, x3_1 = _i4+1, x3_1, x3_0 { + if p != x3_0 || idx != x3_1 { continue } x3_2 := x3.Args[2] @@ -12756,8 +12296,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } _ = x2.Args[3] - for _i5 := 0; _i5 <= 1; _i5++ { - if p != x2.Args[_i5] || idx != x2.Args[1^_i5] { + x2_0 := x2.Args[0] + x2_1 := x2.Args[1] + for _i5 := 0; _i5 <= 1; _i5, x2_0, x2_1 = _i5+1, x2_1, x2_0 { + if p != x2_0 || idx != x2_1 { continue } x2_2 := x2.Args[2] @@ -12769,8 +12311,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } _ = x1.Args[3] - for _i6 := 0; _i6 <= 1; _i6++ { - if p != x1.Args[_i6] || idx != x1.Args[1^_i6] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i6 := 0; _i6 <= 1; _i6, x1_0, x1_1 = _i6+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 { continue } x1_2 := x1.Args[2] @@ -12782,8 +12326,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { continue } mem := x0.Args[3] - for _i7 := 0; _i7 <= 1; _i7++ { - if p != x0.Args[_i7] || idx != x0.Args[1^_i7] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i7 := 0; _i7 <= 1; _i7, x0_0, x0_1 = _i7+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 { continue } x0_2 := x0.Args[2] @@ -12816,22 +12362,22 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRWconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpAMD64MOVWstoreidx1) @@ -12852,22 +12398,22 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRLconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpAMD64MOVWstoreidx1) @@ -12888,22 +12434,22 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpAMD64MOVWstoreidx1) @@ -12924,23 +12470,23 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRLconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -12965,23 +12511,23 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -13003,21 +12549,24 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreidx1 [i] {s} p (MOVQconst [c]) w mem) // cond: is32Bit(i+c) // result: (MOVBstore [i+c] {s} p w mem) for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + c)) { continue } @@ -13034,12 +12583,13 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLload { break } @@ -13064,7 +12614,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVQload { break } @@ -13089,7 +12639,6 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { // cond: c & 0x80000000 == 0 // result: (ANDLconst [c & 0x7fffffff] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -13106,7 +12655,6 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { // match: (MOVLQSX (MOVLQSX x)) // result: (MOVLQSX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLQSX { break } @@ -13118,7 +12666,6 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { // match: (MOVLQSX (MOVWQSX x)) // result: (MOVWQSX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVWQSX { break } @@ -13130,7 +12677,6 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { // match: (MOVLQSX (MOVBQSX x)) // result: (MOVBQSX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVBQSX { break } @@ -13142,15 +12688,15 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVLQSX x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLstore { break } @@ -13172,14 +12718,13 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13193,12 +12738,13 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLload { break } @@ -13223,7 +12769,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVQload { break } @@ -13248,7 +12794,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // cond: zeroUpper32Bits(x,3) // result: x for { - x := v.Args[0] + x := v_0 if !(zeroUpper32Bits(x, 3)) { break } @@ -13261,7 +12807,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLloadidx1 { break } @@ -13288,7 +12834,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLloadidx4 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLloadidx4 { break } @@ -13314,7 +12860,6 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // match: (MOVLQZX (ANDLconst [c] x)) // result: (ANDLconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -13328,7 +12873,6 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // match: (MOVLQZX (MOVLQZX x)) // result: (MOVLQZX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLQZX { break } @@ -13340,7 +12884,6 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // match: (MOVLQZX (MOVWQZX x)) // result: (MOVWQZX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVWQZX { break } @@ -13352,7 +12895,6 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { // match: (MOVLQZX (MOVBQZX x)) // result: (MOVBQZX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVBQZX { break } @@ -13364,19 +12906,20 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVLatomicload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -13393,14 +12936,13 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13414,13 +12956,13 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVLf2i (Arg [off] {sym})) // cond: t.Size() == u.Size() // result: @b.Func.Entry (Arg [off] {sym}) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpArg { break } @@ -13441,13 +12983,13 @@ func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVLi2f (Arg [off] {sym})) // cond: t.Size() == u.Size() // result: @b.Func.Entry (Arg [off] {sym}) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpArg { break } @@ -13468,15 +13010,15 @@ func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVLQZX x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLstore { break } @@ -13498,13 +13040,12 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -13521,14 +13062,13 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13545,8 +13085,6 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -13554,6 +13092,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13571,8 +13110,6 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ4 { break } @@ -13580,6 +13117,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13597,8 +13135,6 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -13606,6 +13142,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13623,15 +13160,16 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -13651,14 +13189,13 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -13675,13 +13212,12 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -13697,9 +13233,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -13715,6 +13249,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVLload [off] {sym} (SB) _) @@ -13723,8 +13258,6 @@ func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -13735,19 +13268,21 @@ func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) // result: (MOVLloadidx4 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c v.Aux = sym @@ -13763,14 +13298,13 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVLloadidx8) v.AuxInt = c v.Aux = sym @@ -13787,15 +13321,14 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -13815,15 +13348,14 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -13843,14 +13375,13 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + c)) { continue } @@ -13866,20 +13397,22 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -13897,14 +13430,13 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + 4*d)) { break } @@ -13922,13 +13454,12 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + 4*c)) { break } @@ -13942,20 +13473,22 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -13973,14 +13506,13 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + 8*d)) { break } @@ -13998,13 +13530,12 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + 8*c)) { break } @@ -14018,18 +13549,20 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) // result: (MOVLstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLQSX { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym @@ -14043,13 +13576,12 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLQZX { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym @@ -14064,14 +13596,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -14089,13 +13620,12 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -14112,13 +13642,12 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -14135,15 +13664,14 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -14161,8 +13689,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -14170,7 +13696,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -14189,8 +13716,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ4 { break } @@ -14198,7 +13723,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -14217,8 +13743,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -14226,7 +13750,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -14245,16 +13770,17 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -14272,6 +13798,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) @@ -14280,14 +13809,12 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -14309,15 +13836,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRQconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -14343,9 +13868,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - x1 := v.Args[1] + p := v_0 + x1 := v_1 if x1.Op != OpAMD64MOVLload { break } @@ -14353,7 +13877,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { s2 := x1.Aux mem := x1.Args[1] p2 := x1.Args[0] - mem2 := v.Args[2] + mem2 := v_2 if mem2.Op != OpAMD64MOVLstore || mem2.AuxInt != i-4 || mem2.Aux != s { break } @@ -14388,15 +13912,14 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -14414,14 +13937,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -14439,15 +13961,14 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ADDLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64ADDLmodify) @@ -14464,15 +13985,14 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ANDLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64ANDLmodify) @@ -14489,15 +14009,14 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ORLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64ORLmodify) @@ -14514,15 +14033,14 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64XORLload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64XORLmodify) @@ -14539,24 +14057,25 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ADDL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64ADDLmodify) @@ -14572,15 +14091,17 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // result: (SUBLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SUBL { break } @@ -14589,8 +14110,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64SUBLmodify) @@ -14607,24 +14128,25 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ANDL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64ANDLmodify) @@ -14643,24 +14165,25 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ORL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64ORLmodify) @@ -14679,24 +14202,25 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64XORL { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64XORLmodify) @@ -14715,9 +14239,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64BTCL { break } @@ -14726,8 +14249,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64BTCLmodify) @@ -14744,9 +14267,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64BTRL { break } @@ -14755,8 +14277,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64BTRLmodify) @@ -14773,9 +14295,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64BTSL { break } @@ -14784,8 +14305,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64BTSLmodify) @@ -14802,9 +14323,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64ADDLconst { break } @@ -14813,9 +14333,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ADDLconstmodify) @@ -14831,9 +14351,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64ANDLconst { break } @@ -14842,9 +14361,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ANDLconstmodify) @@ -14860,9 +14379,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64ORLconst { break } @@ -14871,9 +14389,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ORLconstmodify) @@ -14886,15 +14404,17 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64XORLconst { break } @@ -14903,9 +14423,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64XORLconstmodify) @@ -14921,9 +14441,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64BTCLconst { break } @@ -14932,9 +14451,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64BTCLconstmodify) @@ -14950,9 +14469,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64BTRLconst { break } @@ -14961,9 +14479,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64BTRLconstmodify) @@ -14979,9 +14497,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64BTSLconst { break } @@ -14990,9 +14507,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64BTSLconstmodify) @@ -15007,13 +14524,12 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLf2i { break } val := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVSSstore) v.AuxInt = off v.Aux = sym @@ -15025,6 +14541,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) @@ -15033,13 +14551,12 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -15056,14 +14573,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -15080,8 +14596,6 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -15089,6 +14603,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -15106,8 +14621,6 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ4 { break } @@ -15115,6 +14628,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -15131,13 +14645,12 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -15152,9 +14665,8 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVLstoreconst { break } @@ -15182,9 +14694,8 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { a := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVLstoreconst { break } @@ -15212,14 +14723,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -15236,13 +14746,12 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -15256,6 +14765,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) @@ -15263,14 +14775,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym @@ -15287,15 +14798,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -15315,15 +14825,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -15343,11 +14852,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - i := v.Args[1^_i0] - x := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + i := v_1 + x := v_2 if x.Op != OpAMD64MOVLstoreconstidx1 { continue } @@ -15356,8 +14864,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { continue } mem := x.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || i != x.Args[1^_i1] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || i != x_1 || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { continue } v.reset(OpAMD64MOVQstoreidx1) @@ -15377,6 +14887,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) @@ -15385,14 +14898,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { break } @@ -15410,14 +14922,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(4 * c)) { break } @@ -15435,10 +14946,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] + p := v_0 + i := v_1 + x := v_2 if x.Op != OpAMD64MOVLstoreconstidx4 { break } @@ -15467,20 +14977,23 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { continue } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c v.Aux = sym @@ -15497,15 +15010,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { continue } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpAMD64MOVLstoreidx8) v.AuxInt = c v.Aux = sym @@ -15523,16 +15035,15 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -15553,16 +15064,15 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -15583,22 +15093,22 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 32 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVLstoreidx1 || x.AuxInt != i-4 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpAMD64MOVQstoreidx1) @@ -15619,23 +15129,23 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVLstoreidx1 || x.AuxInt != i-4 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -15660,15 +15170,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + c)) { continue } @@ -15685,6 +15194,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) @@ -15692,15 +15205,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -15719,15 +15231,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + 4*d)) { break } @@ -15746,15 +15257,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 32 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVLstoreidx4 || x.AuxInt != i-4 || x.Aux != s { break } @@ -15780,16 +15289,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst { break } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVLstoreidx4 || x.AuxInt != i-4 || x.Aux != s { break } @@ -15819,14 +15326,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + 4*c)) { break } @@ -15841,21 +15347,24 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -15874,15 +15383,14 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + 8*d)) { break } @@ -15901,14 +15409,13 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + 8*c)) { break } @@ -15923,19 +15430,20 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVOload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -15952,14 +15460,13 @@ func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -15973,20 +15480,22 @@ func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVOstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -16004,15 +15513,14 @@ func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16027,19 +15535,20 @@ func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVQatomicload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -16056,14 +15565,13 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16077,13 +15585,13 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVQf2i (Arg [off] {sym})) // cond: t.Size() == u.Size() // result: @b.Func.Entry (Arg [off] {sym}) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpArg { break } @@ -16104,13 +15612,13 @@ func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVQi2f (Arg [off] {sym})) // cond: t.Size() == u.Size() // result: @b.Func.Entry (Arg [off] {sym}) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpArg { break } @@ -16131,6 +15639,8 @@ func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) @@ -16139,9 +15649,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQstore { break } @@ -16164,13 +15672,12 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -16187,14 +15694,13 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16211,8 +15717,6 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -16220,6 +15724,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16237,8 +15742,6 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -16246,6 +15749,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16263,15 +15767,16 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -16291,14 +15796,13 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -16315,13 +15819,12 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -16337,9 +15840,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -16358,8 +15859,6 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -16370,19 +15869,21 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) // result: (MOVQloadidx8 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c v.Aux = sym @@ -16399,15 +15900,14 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -16427,15 +15927,14 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -16455,14 +15954,13 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + c)) { continue } @@ -16478,20 +15976,22 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -16509,14 +16009,13 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + 8*d)) { break } @@ -16534,13 +16033,12 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + 8*c)) { break } @@ -16554,20 +16052,22 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVQstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -16585,13 +16085,12 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(validValAndOff(c, off)) { break } @@ -16608,15 +16107,14 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16634,8 +16132,6 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -16643,7 +16139,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16662,8 +16159,6 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -16671,7 +16166,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -16690,16 +16186,17 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -16720,15 +16217,14 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -16746,14 +16242,13 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -16771,15 +16266,14 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ADDQload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64ADDQmodify) @@ -16796,15 +16290,14 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ANDQload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64ANDQmodify) @@ -16818,21 +16311,23 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) // cond: y.Uses==1 && clobber(y) // result: (ORQmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ORQload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64ORQmodify) @@ -16849,15 +16344,14 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64XORQload || y.AuxInt != off || y.Aux != sym { break } - _ = y.Args[2] + mem := y.Args[2] x := y.Args[0] - if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { break } v.reset(OpAMD64XORQmodify) @@ -16874,24 +16368,25 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ADDQ { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64ADDQmodify) @@ -16910,9 +16405,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64SUBQ { break } @@ -16921,8 +16415,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64SUBQmodify) @@ -16939,24 +16433,25 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ANDQ { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64ANDQmodify) @@ -16975,24 +16470,25 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64ORQ { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64ORQmodify) @@ -17011,24 +16507,25 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64XORQ { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { continue } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] { + mem := l.Args[1] + if ptr != l.Args[0] { continue } - x := y.Args[1^_i0] - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { continue } v.reset(OpAMD64XORQmodify) @@ -17047,9 +16544,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64BTCQ { break } @@ -17058,8 +16554,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64BTCQmodify) @@ -17076,9 +16572,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64BTRQ { break } @@ -17087,8 +16582,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64BTRQmodify) @@ -17105,9 +16600,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - y := v.Args[1] + ptr := v_0 + y := v_1 if y.Op != OpAMD64BTSQ { break } @@ -17116,8 +16610,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] - if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } v.reset(OpAMD64BTSQmodify) @@ -17131,15 +16625,17 @@ func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64ADDQconst { break } @@ -17148,9 +16644,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ADDQconstmodify) @@ -17166,9 +16662,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64ANDQconst { break } @@ -17177,9 +16672,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ANDQconstmodify) @@ -17195,9 +16690,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64ORQconst { break } @@ -17206,9 +16700,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ORQconstmodify) @@ -17224,9 +16718,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64XORQconst { break } @@ -17235,9 +16728,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64XORQconstmodify) @@ -17253,9 +16746,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64BTCQconst { break } @@ -17264,9 +16756,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64BTCQconstmodify) @@ -17282,9 +16774,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64BTRQconst { break } @@ -17293,9 +16784,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64BTRQconstmodify) @@ -17311,9 +16802,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - a := v.Args[1] + ptr := v_0 + a := v_1 if a.Op != OpAMD64BTSQconst { break } @@ -17322,9 +16812,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { break } - _ = l.Args[1] + mem := l.Args[1] ptr2 := l.Args[0] - if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64BTSQconstmodify) @@ -17339,13 +16829,12 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQf2i { break } val := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVSDstore) v.AuxInt = off v.Aux = sym @@ -17357,6 +16846,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) @@ -17365,13 +16856,12 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -17388,14 +16878,13 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -17412,8 +16901,6 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -17421,6 +16908,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -17438,8 +16926,6 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -17447,6 +16933,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -17463,13 +16950,12 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -17484,9 +16970,8 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVQstoreconst { break } @@ -17514,14 +16999,13 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -17538,13 +17022,12 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -17558,19 +17041,21 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = c v.Aux = sym @@ -17587,15 +17072,14 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -17615,15 +17099,14 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -17640,20 +17123,22 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { break } @@ -17671,14 +17156,13 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(8 * c)) { break } @@ -17693,20 +17177,23 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { continue } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c v.Aux = sym @@ -17724,16 +17211,15 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -17754,16 +17240,15 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -17784,15 +17269,14 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + c)) { continue } @@ -17809,21 +17293,24 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -17842,15 +17329,14 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + 8*d)) { break } @@ -17869,14 +17355,13 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + 8*c)) { break } @@ -17891,19 +17376,20 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVSDload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -17920,14 +17406,13 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -17944,8 +17429,6 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -17953,6 +17436,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -17970,8 +17454,6 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -17979,6 +17461,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -17996,15 +17479,16 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -18023,9 +17507,7 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -18041,18 +17523,20 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c v.Aux = sym @@ -18067,14 +17551,13 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -18092,14 +17575,13 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + d)) { break } @@ -18117,13 +17599,12 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + c)) { break } @@ -18137,20 +17618,22 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -18168,14 +17651,13 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + 8*d)) { break } @@ -18193,13 +17675,12 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + 8*c)) { break } @@ -18213,20 +17694,22 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVSDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -18244,15 +17727,14 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18270,8 +17752,6 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -18279,7 +17759,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18298,8 +17779,6 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ8 { break } @@ -18307,7 +17786,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18326,16 +17806,17 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -18355,13 +17836,12 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQi2f { break } val := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVQstore) v.AuxInt = off v.Aux = sym @@ -18373,19 +17853,22 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { break } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c v.Aux = sym @@ -18401,15 +17884,14 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -18428,15 +17910,14 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -18455,14 +17936,13 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + c)) { break } @@ -18477,21 +17957,24 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -18510,15 +17993,14 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + 8*d)) { break } @@ -18537,14 +18019,13 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + 8*c)) { break } @@ -18559,19 +18040,20 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVSSload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -18588,14 +18070,13 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18612,8 +18093,6 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -18621,6 +18100,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18638,8 +18118,6 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ4 { break } @@ -18647,6 +18125,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18664,15 +18143,16 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -18691,9 +18171,7 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -18709,18 +18187,20 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c v.Aux = sym @@ -18735,14 +18215,13 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -18760,14 +18239,13 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + d)) { break } @@ -18785,13 +18263,12 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + c)) { break } @@ -18805,20 +18282,22 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -18836,14 +18315,13 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + 4*d)) { break } @@ -18861,13 +18339,12 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + 4*c)) { break } @@ -18881,20 +18358,22 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVSSstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -18912,15 +18391,14 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18938,8 +18416,6 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -18947,7 +18423,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18966,8 +18443,6 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ4 { break } @@ -18975,7 +18450,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -18994,16 +18470,17 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -19023,13 +18500,12 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLi2f { break } val := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = off v.Aux = sym @@ -19041,19 +18517,22 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c v.Aux = sym @@ -19069,15 +18548,14 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -19096,15 +18574,14 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -19123,14 +18600,13 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + c)) { break } @@ -19145,21 +18621,24 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -19178,15 +18657,14 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + 4*d)) { break } @@ -19205,14 +18683,13 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + 4*c)) { break } @@ -19227,12 +18704,13 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVWload { break } @@ -19257,7 +18735,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLload { break } @@ -19282,7 +18760,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVQload { break } @@ -19307,7 +18785,6 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { // cond: c & 0x8000 == 0 // result: (ANDLconst [c & 0x7fff] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -19324,7 +18801,6 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { // match: (MOVWQSX (MOVWQSX x)) // result: (MOVWQSX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVWQSX { break } @@ -19336,7 +18812,6 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { // match: (MOVWQSX (MOVBQSX x)) // result: (MOVBQSX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVBQSX { break } @@ -19348,15 +18823,15 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVWQSX x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVWstore { break } @@ -19378,14 +18853,13 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -19399,12 +18873,13 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVWload { break } @@ -19429,7 +18904,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVLload { break } @@ -19454,7 +18929,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVQload { break } @@ -19479,7 +18954,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // cond: zeroUpper48Bits(x,3) // result: x for { - x := v.Args[0] + x := v_0 if !(zeroUpper48Bits(x, 3)) { break } @@ -19492,7 +18967,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVWloadidx1 { break } @@ -19519,7 +18994,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpAMD64MOVWloadidx2 { break } @@ -19545,7 +19020,6 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // match: (MOVWQZX (ANDLconst [c] x)) // result: (ANDLconst [c & 0xffff] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { break } @@ -19559,7 +19033,6 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // match: (MOVWQZX (MOVWQZX x)) // result: (MOVWQZX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVWQZX { break } @@ -19571,7 +19044,6 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { // match: (MOVWQZX (MOVBQZX x)) // result: (MOVBQZX x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVBQZX { break } @@ -19583,6 +19055,8 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) @@ -19591,9 +19065,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVWstore { break } @@ -19615,13 +19087,12 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -19638,14 +19109,13 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -19662,8 +19132,6 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -19671,6 +19139,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -19688,8 +19157,6 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ2 { break } @@ -19697,6 +19164,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -19714,15 +19182,16 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -19742,14 +19211,13 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -19766,13 +19234,12 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -19789,8 +19256,6 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -19801,19 +19266,21 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) // result: (MOVWloadidx2 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c v.Aux = sym @@ -19830,15 +19297,14 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -19858,15 +19324,14 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + d)) { continue } @@ -19886,14 +19351,13 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + c)) { continue } @@ -19909,20 +19373,22 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is32Bit(c + d)) { break } @@ -19940,14 +19406,13 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is32Bit(c + 2*d)) { break } @@ -19965,13 +19430,12 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(i + 2*c)) { break } @@ -19985,18 +19449,20 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVWQSX { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = off v.Aux = sym @@ -20010,13 +19476,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVWQZX { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = off v.Aux = sym @@ -20031,14 +19496,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -20056,13 +19520,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -20079,13 +19542,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + mem := v_2 if !(validOff(off)) { break } @@ -20102,15 +19564,14 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -20128,8 +19589,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -20137,7 +19596,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -20156,8 +19616,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ2 { break } @@ -20165,7 +19623,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -20184,16 +19643,17 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -20214,14 +19674,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -20240,6 +19698,9 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) @@ -20248,14 +19709,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -20277,15 +19736,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -20311,15 +19768,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64SHRQconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -20345,9 +19800,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - x1 := v.Args[1] + p := v_0 + x1 := v_1 if x1.Op != OpAMD64MOVWload { break } @@ -20355,7 +19809,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { s2 := x1.Aux mem := x1.Args[1] p2 := x1.Args[0] - mem2 := v.Args[2] + mem2 := v_2 if mem2.Op != OpAMD64MOVWstore || mem2.AuxInt != i-2 || mem2.Aux != s { break } @@ -20390,15 +19844,14 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -20416,14 +19869,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -20438,19 +19890,20 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) // cond: ValAndOff(sc).canAdd(off) // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -20467,14 +19920,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -20491,8 +19943,6 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ1 { break } @@ -20500,6 +19950,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -20517,8 +19968,6 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { x := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ2 { break } @@ -20526,6 +19975,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -20542,13 +19992,12 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQ { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = x v.Aux = sym @@ -20563,9 +20012,8 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVWstoreconst { break } @@ -20590,9 +20038,8 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { a := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpAMD64MOVWstoreconst { break } @@ -20617,14 +20064,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAL { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -20641,13 +20087,12 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDLconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(ValAndOff(sc).canAdd(off)) { break } @@ -20661,19 +20106,21 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { continue } idx := v_1.Args[0] + mem := v_2 v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = c v.Aux = sym @@ -20690,15 +20137,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -20718,15 +20164,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(c)) { continue } @@ -20746,11 +20191,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - i := v.Args[1^_i0] - x := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + i := v_1 + x := v_2 if x.Op != OpAMD64MOVWstoreconstidx1 { continue } @@ -20759,8 +20203,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { continue } mem := x.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || i != x.Args[1^_i1] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || i != x_1 || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { continue } v.reset(OpAMD64MOVLstoreconstidx1) @@ -20777,6 +20223,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) // cond: ValAndOff(x).canAdd(c) @@ -20784,14 +20233,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } c := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(ValAndOff(x).canAdd(c)) { break } @@ -20809,14 +20257,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { for { x := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(ValAndOff(x).canAdd(2 * c)) { break } @@ -20834,10 +20281,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] + p := v_0 + i := v_1 + x := v_2 if x.Op != OpAMD64MOVWstoreconstidx2 { break } @@ -20863,20 +20309,23 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { continue } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c v.Aux = sym @@ -20894,16 +20343,15 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64ADDQconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -20924,16 +20372,15 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { continue } @@ -20954,22 +20401,22 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRLconst || v_2.AuxInt != 16 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpAMD64MOVLstoreidx1) @@ -20990,22 +20437,22 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 16 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpAMD64MOVLstoreidx1) @@ -21026,23 +20473,23 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRLconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -21067,23 +20514,23 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -21108,15 +20555,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 if v_1.Op != OpAMD64MOVQconst { continue } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + c)) { continue } @@ -21133,6 +20579,10 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) @@ -21140,15 +20590,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is32Bit(c + d)) { break } @@ -21167,15 +20616,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is32Bit(c + 2*d)) { break } @@ -21194,15 +20642,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRLconst || v_2.AuxInt != 16 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { break } @@ -21228,15 +20674,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 16 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { break } @@ -21262,16 +20706,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + p := v_0 + idx := v_1 if v_2.Op != OpAMD64SHRQconst { break } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpAMD64MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { break } @@ -21301,14 +20743,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt - w := v.Args[2] + w := v_2 + mem := v_3 if !(is32Bit(i + 2*c)) { break } @@ -21323,13 +20764,13 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULL x (MOVLconst [c])) // result: (MULLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVLconst { continue } @@ -21344,12 +20785,12 @@ func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLconst [c] (MULLconst [d] x)) // result: (MULLconst [int64(int32(c * d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MULLconst { break } @@ -21366,7 +20807,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != -9 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) v0.AddArg(x) @@ -21380,7 +20821,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != -5 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) v0.AddArg(x) @@ -21394,7 +20835,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != -3 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) v0.AddArg(x) @@ -21408,7 +20849,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGL) v.AddArg(x) return true @@ -21429,7 +20870,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != 1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21441,7 +20882,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != 3 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL2) v.AddArg(x) v.AddArg(x) @@ -21453,7 +20894,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != 5 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL4) v.AddArg(x) v.AddArg(x) @@ -21465,7 +20906,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { if v.AuxInt != 7 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL2) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) @@ -21477,6 +20918,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLconst [ 9] x) // result: (LEAL8 x x) @@ -21484,7 +20926,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 9 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v.AddArg(x) v.AddArg(x) @@ -21496,7 +20938,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 11 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL2) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) @@ -21511,7 +20953,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 13 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL4) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) @@ -21526,7 +20968,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 19 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL2) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) @@ -21541,7 +20983,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 21 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL4) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) @@ -21556,7 +20998,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 25 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) @@ -21571,7 +21013,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 27 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) v0.AddArg(x) @@ -21589,7 +21031,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 37 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL4) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) @@ -21604,7 +21046,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 41 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) @@ -21619,7 +21061,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { if v.AuxInt != 45 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) v0.AddArg(x) @@ -21634,6 +21076,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLconst [73] x) // result: (LEAL8 x (LEAL8 x x)) @@ -21641,7 +21084,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { if v.AuxInt != 73 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) @@ -21656,7 +21099,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { if v.AuxInt != 81 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAL8) v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) v0.AddArg(x) @@ -21673,7 +21116,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (SUBL (SHLLconst [log2(c+1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c+1) && c >= 15) { break } @@ -21690,7 +21133,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (LEAL1 (SHLLconst [log2(c-1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-1) && c >= 17) { break } @@ -21707,7 +21150,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (LEAL2 (SHLLconst [log2(c-2)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-2) && c >= 34) { break } @@ -21724,7 +21167,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (LEAL4 (SHLLconst [log2(c-4)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-4) && c >= 68) { break } @@ -21741,7 +21184,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (LEAL8 (SHLLconst [log2(c-8)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-8) && c >= 136) { break } @@ -21758,7 +21201,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (SHLLconst [log2(c/3)] (LEAL2 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } @@ -21775,7 +21218,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (SHLLconst [log2(c/5)] (LEAL4 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } @@ -21792,7 +21235,7 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { // result: (SHLLconst [log2(c/9)] (LEAL8 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } @@ -21807,11 +21250,11 @@ func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULLconst_30(v *Value) bool { + v_0 := v.Args[0] // match: (MULLconst [c] (MOVLconst [d])) // result: (MOVLconst [int64(int32(c*d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLconst { break } @@ -21823,14 +21266,14 @@ func rewriteValueAMD64_OpAMD64MULLconst_30(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULQ x (MOVQconst [c])) // cond: is32Bit(c) // result: (MULQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } @@ -21848,13 +21291,13 @@ func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULQconst [c] (MULQconst [d] x)) // cond: is32Bit(c*d) // result: (MULQconst [c * d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MULQconst { break } @@ -21874,7 +21317,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != -9 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) v0.AddArg(x) @@ -21888,7 +21331,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != -5 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) v0.AddArg(x) @@ -21902,7 +21345,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != -3 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGQ) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) v0.AddArg(x) @@ -21916,7 +21359,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64NEGQ) v.AddArg(x) return true @@ -21937,7 +21380,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != 1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21949,7 +21392,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != 3 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ2) v.AddArg(x) v.AddArg(x) @@ -21961,7 +21404,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != 5 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ4) v.AddArg(x) v.AddArg(x) @@ -21973,7 +21416,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { if v.AuxInt != 7 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ2) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) @@ -21985,6 +21428,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULQconst [ 9] x) // result: (LEAQ8 x x) @@ -21992,7 +21436,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 9 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v.AddArg(x) v.AddArg(x) @@ -22004,7 +21448,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 11 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ2) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) @@ -22019,7 +21463,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 13 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ4) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) @@ -22034,7 +21478,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 19 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ2) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) @@ -22049,7 +21493,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 21 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ4) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) @@ -22064,7 +21508,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 25 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) @@ -22079,7 +21523,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 27 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) v0.AddArg(x) @@ -22097,7 +21541,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 37 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ4) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) @@ -22112,7 +21556,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 41 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) @@ -22127,7 +21571,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { if v.AuxInt != 45 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) v0.AddArg(x) @@ -22142,6 +21586,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULQconst [73] x) // result: (LEAQ8 x (LEAQ8 x x)) @@ -22149,7 +21594,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { if v.AuxInt != 73 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) @@ -22164,7 +21609,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { if v.AuxInt != 81 { break } - x := v.Args[0] + x := v_0 v.reset(OpAMD64LEAQ8) v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) v0.AddArg(x) @@ -22181,7 +21626,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (SUBQ (SHLQconst [log2(c+1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c+1) && c >= 15) { break } @@ -22198,7 +21643,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (LEAQ1 (SHLQconst [log2(c-1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-1) && c >= 17) { break } @@ -22215,7 +21660,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (LEAQ2 (SHLQconst [log2(c-2)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-2) && c >= 34) { break } @@ -22232,7 +21677,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (LEAQ4 (SHLQconst [log2(c-4)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-4) && c >= 68) { break } @@ -22249,7 +21694,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (LEAQ8 (SHLQconst [log2(c-8)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-8) && c >= 136) { break } @@ -22266,7 +21711,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (SHLQconst [log2(c/3)] (LEAQ2 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } @@ -22283,7 +21728,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (SHLQconst [log2(c/5)] (LEAQ4 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } @@ -22300,7 +21745,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { // result: (SHLQconst [log2(c/9)] (LEAQ8 x x)) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } @@ -22315,11 +21760,11 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { + v_0 := v.Args[0] // match: (MULQconst [c] (MOVQconst [d])) // result: (MOVQconst [c*d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -22333,7 +21778,6 @@ func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { // result: (MULQconst [-c] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64NEGQ { break } @@ -22349,14 +21793,15 @@ func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVSDload { continue } @@ -22380,6 +21825,9 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -22388,14 +21836,13 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -22413,15 +21860,14 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -22438,10 +21884,8 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -22460,14 +21904,15 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVSSload { continue } @@ -22491,6 +21936,9 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -22499,14 +21947,13 @@ func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -22524,15 +21971,14 @@ func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -22549,10 +21995,8 @@ func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -22571,10 +22015,10 @@ func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGL (NEGL x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpAMD64NEGL { break } @@ -22588,7 +22032,7 @@ func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { // cond: s.Uses == 1 // result: (SUBL y x) for { - s := v.Args[0] + s := v_0 if s.Op != OpAMD64SUBL { break } @@ -22605,7 +22049,6 @@ func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { // match: (NEGL (MOVLconst [c])) // result: (MOVLconst [int64(int32(-c))]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLconst { break } @@ -22617,10 +22060,10 @@ func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGQ (NEGQ x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpAMD64NEGQ { break } @@ -22634,7 +22077,7 @@ func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { // cond: s.Uses == 1 // result: (SUBQ y x) for { - s := v.Args[0] + s := v_0 if s.Op != OpAMD64SUBQ { break } @@ -22651,7 +22094,6 @@ func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { // match: (NEGQ (MOVQconst [c])) // result: (MOVQconst [-c]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -22664,7 +22106,6 @@ func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { // cond: c != -(1<<31) // result: (ADDQconst [-c] x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } @@ -22685,10 +22126,10 @@ func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { + v_0 := v.Args[0] // match: (NOTL (MOVLconst [c])) // result: (MOVLconst [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLconst { break } @@ -22700,10 +22141,10 @@ func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { + v_0 := v.Args[0] // match: (NOTQ (MOVQconst [c])) // result: (MOVQconst [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -22715,12 +22156,12 @@ func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ORL (SHLL (MOVLconst [1]) y) x) // result: (BTSL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } @@ -22729,7 +22170,7 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpAMD64BTSL) v.AddArg(x) v.AddArg(y) @@ -22741,14 +22182,12 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 // result: (BTSLconst [log2uint32(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { continue } @@ -22762,10 +22201,8 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // match: (ORL x (MOVLconst [c])) // result: (ORLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVLconst { continue } @@ -22781,15 +22218,12 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // cond: d==32-c // result: (ROLLconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRLconst { continue } @@ -22809,15 +22243,12 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // result: (ROLWconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRWconst { continue } @@ -22837,15 +22268,12 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // result: (ROLBconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRBconst { continue } @@ -22863,21 +22291,19 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) // result: (ROLL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRL { continue } @@ -22886,11 +22312,7 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -22920,21 +22342,19 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) // result: (ROLL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRL { continue } @@ -22943,11 +22363,7 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -22977,21 +22393,19 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) // result: (RORL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRL { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLL { continue } @@ -23000,11 +22414,7 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -23034,21 +22444,19 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) // result: (RORL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRL { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLL { continue } @@ -23057,11 +22465,7 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -23091,15 +22495,15 @@ func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) // cond: v.Type.Size() == 2 // result: (ROLW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } @@ -23110,13 +22514,13 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRW { continue } @@ -23133,11 +22537,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -23168,9 +22568,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 2 // result: (ROLW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } @@ -23181,13 +22579,13 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRW { continue } @@ -23204,11 +22602,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -23239,9 +22633,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 2 // result: (RORW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRW { continue } @@ -23252,7 +22644,6 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHLL { continue } @@ -23283,9 +22674,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 2 // result: (RORW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRW { continue } @@ -23296,7 +22685,6 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHLL { continue } @@ -23327,9 +22715,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 1 // result: (ROLB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } @@ -23340,13 +22726,13 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRB { continue } @@ -23363,11 +22749,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -23398,9 +22780,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 1 // result: (ROLB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } @@ -23411,13 +22791,13 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDL { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRB { continue } @@ -23434,11 +22814,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBLcarrymask { + if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -23469,9 +22845,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 1 // result: (RORB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRB { continue } @@ -23482,7 +22856,6 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHLL { continue } @@ -23513,9 +22886,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: v.Type.Size() == 1 // result: (RORB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRB { continue } @@ -23526,7 +22897,6 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHLL { continue } @@ -23556,8 +22926,8 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // match: (ORL x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -23569,9 +22939,8 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVBload { continue } @@ -23579,7 +22948,7 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { continue } @@ -23610,15 +22979,16 @@ func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVWload { continue } @@ -23626,7 +22996,7 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { continue } @@ -23658,9 +23028,8 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpAMD64SHLLconst { continue } @@ -23673,13 +23042,15 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpAMD64ORL { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpAMD64SHLLconst { continue } @@ -23696,7 +23067,7 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { if p != x0.Args[0] || mem != x0.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -23723,19 +23094,20 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVBloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { continue } @@ -23748,8 +23120,10 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -23771,19 +23145,20 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVWloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { continue } @@ -23796,8 +23171,10 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -23819,9 +23196,8 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpAMD64SHLLconst { continue } @@ -23833,16 +23209,20 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - or := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + or := v_1 if or.Op != OpAMD64ORL { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s0 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpAMD64SHLLconst { continue } @@ -23856,11 +23236,13 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { continue } _ = x0.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -23890,9 +23272,8 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpAMD64MOVBload { continue } @@ -23900,7 +23281,7 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { continue } @@ -23935,9 +23316,8 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r1 := v_0 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { continue } @@ -23949,7 +23329,7 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { continue } @@ -23987,9 +23367,8 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpAMD64SHLLconst { continue } @@ -24002,13 +23381,15 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpAMD64ORL { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpAMD64SHLLconst { continue } @@ -24025,7 +23406,7 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -24055,19 +23436,20 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpAMD64MOVBloadidx1 { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { continue } @@ -24080,8 +23462,10 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -24106,9 +23490,8 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r1 := v_0 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { continue } @@ -24119,10 +23502,12 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { continue } @@ -24139,8 +23524,10 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -24163,15 +23550,16 @@ func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpAMD64SHLLconst { continue } @@ -24183,16 +23571,20 @@ func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - or := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + or := v_1 if or.Op != OpAMD64ORL { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s1 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpAMD64SHLLconst { continue } @@ -24206,11 +23598,13 @@ func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -24243,10 +23637,9 @@ func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVLload { continue } @@ -24270,12 +23663,13 @@ func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORLconst [c] x) // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 // result: (BTSLconst [log2uint32(c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { break } @@ -24288,7 +23682,6 @@ func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { // result: (ORLconst [c | d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ORLconst { break } @@ -24303,7 +23696,6 @@ func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { // result: (ORLconst [c | 1<= 128 // result: (BTSQconst [log2(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { continue } @@ -24590,10 +23979,8 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // cond: is32Bit(c) // result: (ORQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } @@ -24612,15 +23999,12 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // cond: d==64-c // result: (ROLQconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLQconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRQconst { continue } @@ -24638,21 +24022,19 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) // result: (ROLQ x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLQ { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDQ { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRQ { continue } @@ -24661,11 +24043,7 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBQcarrymask { + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -24695,21 +24073,19 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) // result: (ROLQ x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLQ { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDQ { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHRQ { continue } @@ -24718,11 +24094,7 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBQcarrymask { + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -24752,21 +24124,19 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) // result: (RORQ x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRQ { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDQ { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLQ { continue } @@ -24775,11 +24145,7 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBQcarrymask { + if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -24809,21 +24175,19 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) // result: (RORQ x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHRQ { continue } y := v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64ANDQ { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLQ { continue } @@ -24832,11 +24196,7 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { continue } v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1^_i1] - if v_1_1.Op != OpAMD64SBBQcarrymask { + if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { continue } v_1_1_0 := v_1_1.Args[0] @@ -24866,8 +24226,8 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // match: (ORQ x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -24879,9 +24239,8 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVBload { continue } @@ -24889,7 +24248,7 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { continue } @@ -24920,15 +24279,16 @@ func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVWload { continue } @@ -24936,7 +24296,7 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { continue } @@ -24968,9 +24328,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVLload { continue } @@ -24978,7 +24337,7 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { continue } @@ -25010,9 +24369,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25025,13 +24383,15 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25048,7 +24408,7 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { if p != x0.Args[0] || mem != x0.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25075,9 +24435,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25090,13 +24449,15 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25113,7 +24474,7 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { if p != x0.Args[0] || mem != x0.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25140,19 +24501,20 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVBloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { continue } @@ -25165,8 +24527,10 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -25188,19 +24552,20 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVWloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { continue } @@ -25213,8 +24578,10 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -25236,19 +24603,20 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpAMD64MOVLloadidx1 { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { continue } @@ -25261,8 +24629,10 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -25284,9 +24654,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25298,16 +24667,20 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - or := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s0 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25321,11 +24694,13 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { continue } _ = x0.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25355,9 +24730,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25369,16 +24743,20 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - or := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s0 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25392,11 +24770,13 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { continue } _ = x0.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25426,9 +24806,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpAMD64MOVBload { continue } @@ -25436,7 +24815,7 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { continue } @@ -25470,15 +24849,16 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r1 := v_0 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { continue } @@ -25490,7 +24870,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { continue } @@ -25528,9 +24908,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r1 := v_0 if r1.Op != OpAMD64BSWAPL { continue } @@ -25542,7 +24921,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { continue } @@ -25580,9 +24959,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25595,13 +24973,15 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25618,7 +24998,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25648,9 +25028,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25667,13 +25046,15 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25694,7 +25075,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25723,19 +25104,20 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpAMD64MOVBloadidx1 { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { continue } @@ -25748,8 +25130,10 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -25774,9 +25158,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r1 := v_0 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { continue } @@ -25787,10 +25170,12 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { continue } @@ -25807,8 +25192,10 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -25832,9 +25219,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r1 := v_0 if r1.Op != OpAMD64BSWAPL { continue } @@ -25845,10 +25231,12 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { continue } @@ -25865,8 +25253,10 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -25890,9 +25280,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25904,16 +25293,20 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - or := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s1 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -25927,11 +25320,13 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -25964,9 +25359,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpAMD64SHLQconst { continue } @@ -25982,16 +25376,20 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - or := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + or := v_1 if or.Op != OpAMD64ORQ { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s1 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpAMD64SHLQconst { continue } @@ -26009,11 +25407,13 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -26045,10 +25445,9 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORQload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVQload { continue } @@ -26072,12 +25471,13 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORQconst [c] x) // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 // result: (BTSQconst [log2(c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { break } @@ -26090,7 +25490,6 @@ func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { // result: (ORQconst [c | d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64ORQconst { break } @@ -26105,7 +25504,6 @@ func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { // result: (ORQconst [c | 1<>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -27023,13 +26370,13 @@ func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SARL x (MOVQconst [c])) // result: (SARLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -27042,9 +26389,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // match: (SARL x (MOVLconst [c])) // result: (SARLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -27058,9 +26403,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SARL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDQconst { break } @@ -27078,9 +26421,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SARL x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -27105,9 +26446,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SARL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDQconst { break } @@ -27125,9 +26464,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SARL x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -27152,9 +26489,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SARL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDLconst { break } @@ -27172,9 +26507,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SARL x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -27199,9 +26532,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SARL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDLconst { break } @@ -27219,9 +26550,7 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SARL x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -27245,13 +26574,14 @@ func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SARLconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -27261,7 +26591,6 @@ func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { // result: (MOVQconst [int64(int32(d))>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -27273,13 +26602,13 @@ func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SARQ x (MOVQconst [c])) // result: (SARQconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -27292,9 +26621,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // match: (SARQ x (MOVLconst [c])) // result: (SARQconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -27308,9 +26635,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SARQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDQconst { break } @@ -27328,9 +26653,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SARQ x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -27355,9 +26678,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SARQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDQconst { break } @@ -27375,9 +26696,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SARQ x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -27402,9 +26721,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SARQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDLconst { break } @@ -27422,9 +26739,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SARQ x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -27449,9 +26764,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SARQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDLconst { break } @@ -27469,9 +26782,7 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SARQ x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -27495,13 +26806,14 @@ func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SARQconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -27511,7 +26823,6 @@ func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { // result: (MOVQconst [d>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -27523,12 +26834,12 @@ func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SARW x (MOVQconst [c])) // result: (SARWconst [min(c&31,15)] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -27541,9 +26852,7 @@ func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { // match: (SARW x (MOVLconst [c])) // result: (SARWconst [min(c&31,15)] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -27556,13 +26865,14 @@ func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SARWconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -27572,7 +26882,6 @@ func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { // result: (MOVQconst [int64(int16(d))>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -27584,10 +26893,10 @@ func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { + v_0 := v.Args[0] // match: (SBBLcarrymask (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -27598,7 +26907,6 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagLT_ULT)) // result: (MOVLconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -27609,7 +26917,6 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -27620,7 +26927,6 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagGT_ULT)) // result: (MOVLconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -27631,7 +26937,6 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { // match: (SBBLcarrymask (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -27642,17 +26947,19 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SBBQ x (MOVQconst [c]) borrow) // cond: is32Bit(c) // result: (SBBQconst x [c] borrow) for { - borrow := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := v_1.AuxInt + borrow := v_2 if !(is32Bit(c)) { break } @@ -27665,10 +26972,8 @@ func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool { // match: (SBBQ x y (FlagEQ)) // result: (SUBQborrow x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64FlagEQ { break } @@ -27680,10 +26985,10 @@ func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { + v_0 := v.Args[0] // match: (SBBQcarrymask (FlagEQ)) // result: (MOVQconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -27694,7 +26999,6 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { // match: (SBBQcarrymask (FlagLT_ULT)) // result: (MOVQconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -27705,7 +27009,6 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { // match: (SBBQcarrymask (FlagLT_UGT)) // result: (MOVQconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -27716,7 +27019,6 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { // match: (SBBQcarrymask (FlagGT_ULT)) // result: (MOVQconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -27727,7 +27029,6 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { // match: (SBBQcarrymask (FlagGT_UGT)) // result: (MOVQconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -27738,13 +27039,13 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SBBQconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SBBQconst x [c] (FlagEQ)) // result: (SUBQconstborrow x [c]) for { c := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64FlagEQ { break } @@ -27756,10 +27057,10 @@ func rewriteValueAMD64_OpAMD64SBBQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETA (InvertFlags x)) // result: (SETB x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -27771,7 +27072,6 @@ func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { // match: (SETA (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -27782,7 +27082,6 @@ func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { // match: (SETA (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -27793,7 +27092,6 @@ func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { // match: (SETA (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -27804,7 +27102,6 @@ func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { // match: (SETA (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -27815,7 +27112,6 @@ func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { // match: (SETA (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -27826,10 +27122,10 @@ func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETAE (InvertFlags x)) // result: (SETBE x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -27841,7 +27137,6 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { // match: (SETAE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -27852,7 +27147,6 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { // match: (SETAE (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -27863,7 +27157,6 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { // match: (SETAE (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -27874,7 +27167,6 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { // match: (SETAE (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -27885,7 +27177,6 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { // match: (SETAE (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -27896,6 +27187,9 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) @@ -27903,13 +27197,12 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETBEstore) v.AuxInt = off v.Aux = sym @@ -27924,14 +27217,13 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -27949,15 +27241,14 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -27974,12 +27265,11 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -27995,12 +27285,11 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28016,12 +27305,11 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28037,12 +27325,11 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28058,12 +27345,11 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28077,6 +27363,9 @@ func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) @@ -28084,13 +27373,12 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym @@ -28105,14 +27393,13 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -28130,15 +27417,14 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -28155,12 +27441,11 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28176,12 +27461,11 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28197,12 +27481,11 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28218,12 +27501,11 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28239,12 +27521,11 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28258,10 +27539,10 @@ func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETB (InvertFlags x)) // result: (SETA x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -28273,7 +27554,6 @@ func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -28284,7 +27564,6 @@ func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -28295,7 +27574,6 @@ func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -28306,7 +27584,6 @@ func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -28317,7 +27594,6 @@ func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -28328,10 +27604,10 @@ func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETBE (InvertFlags x)) // result: (SETAE x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -28343,7 +27619,6 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { // match: (SETBE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -28354,7 +27629,6 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { // match: (SETBE (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -28365,7 +27639,6 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { // match: (SETBE (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -28376,7 +27649,6 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { // match: (SETBE (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -28387,7 +27659,6 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { // match: (SETBE (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -28398,6 +27669,9 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) @@ -28405,13 +27679,12 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym @@ -28426,14 +27699,13 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -28451,15 +27723,14 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -28476,12 +27747,11 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28497,12 +27767,11 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28518,12 +27787,11 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28539,12 +27807,11 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28560,12 +27827,11 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28579,6 +27845,9 @@ func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) @@ -28586,13 +27855,12 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETAstore) v.AuxInt = off v.Aux = sym @@ -28607,14 +27875,13 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -28632,15 +27899,14 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -28657,12 +27923,11 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28678,12 +27943,11 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28699,12 +27963,11 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28720,12 +27983,11 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28741,12 +28003,11 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -28760,17 +28021,18 @@ func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) // result: (SETAE (BTL x y)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLL { continue } @@ -28779,7 +28041,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg(x) @@ -28792,13 +28054,13 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) // result: (SETAE (BTQ x y)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLQ { continue } @@ -28807,7 +28069,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg(x) @@ -28821,7 +28083,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // cond: isUint32PowerOfTwo(c) // result: (SETAE (BTLconst [log2uint32(c)] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTLconst { break } @@ -28841,7 +28102,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // cond: isUint64PowerOfTwo(c) // result: (SETAE (BTQconst [log2(c)] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQconst { break } @@ -28861,18 +28121,18 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // cond: isUint64PowerOfTwo(c) // result: (SETAE (BTQconst [log2(c)] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64MOVQconst { continue } c := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } @@ -28888,7 +28148,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) // result: (SETNE (CMPLconst [0] s)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64CMPLconst || v_0.AuxInt != 1 { break } @@ -28906,7 +28165,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) // result: (SETNE (CMPQconst [0] s)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64CMPQconst || v_0.AuxInt != 1 { break } @@ -28925,13 +28183,14 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // cond: z1==z2 // result: (SETAE (BTQconst [63] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { continue } @@ -28940,7 +28199,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -28957,13 +28216,14 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // cond: z1==z2 // result: (SETAE (BTQconst [31] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { continue } @@ -28972,7 +28232,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -28989,13 +28249,14 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { // cond: z1==z2 // result: (SETAE (BTQconst [0] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } @@ -29004,7 +28265,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -29020,18 +28281,20 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) // cond: z1==z2 // result: (SETAE (BTLconst [0] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } @@ -29040,7 +28303,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -29057,18 +28320,19 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // cond: z1==z2 // result: (SETAE (BTQconst [63] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -29085,18 +28349,19 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // cond: z1==z2 // result: (SETAE (BTLconst [31] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -29112,7 +28377,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // match: (SETEQ (InvertFlags x)) // result: (SETEQ x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -29124,7 +28388,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // match: (SETEQ (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -29135,7 +28398,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // match: (SETEQ (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -29146,7 +28408,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // match: (SETEQ (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -29157,7 +28418,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // match: (SETEQ (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -29168,7 +28428,6 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { // match: (SETEQ (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -29179,21 +28438,23 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLL { continue } @@ -29202,7 +28463,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { continue } - y := v_1.Args[1^_i0] + y := v_1_1 + mem := v_2 v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym @@ -29221,15 +28483,14 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLQ { continue } @@ -29238,7 +28499,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { continue } - y := v_1.Args[1^_i0] + y := v_1_1 + mem := v_2 v.reset(OpAMD64SETAEstore) v.AuxInt = off v.Aux = sym @@ -29258,14 +28520,13 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTLconst { break } c := v_1.AuxInt x := v_1.Args[0] + mem := v_2 if !(isUint32PowerOfTwo(c)) { break } @@ -29286,14 +28547,13 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQconst { break } c := v_1.AuxInt x := v_1.Args[0] + mem := v_2 if !(isUint64PowerOfTwo(c)) { break } @@ -29314,20 +28574,20 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64MOVQconst { continue } c := v_1_0.AuxInt - x := v_1.Args[1^_i0] + x := v_1_1 + mem := v_2 if !(isUint64PowerOfTwo(c)) { continue } @@ -29349,9 +28609,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64CMPLconst || v_1.AuxInt != 1 { break } @@ -29359,6 +28617,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { break } + mem := v_2 v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym @@ -29375,9 +28634,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64CMPQconst || v_1.AuxInt != 1 { break } @@ -29385,6 +28642,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { break } + mem := v_2 v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym @@ -29402,15 +28660,15 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { continue } @@ -29419,7 +28677,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -29442,15 +28701,15 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { continue } @@ -29459,7 +28718,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -29482,15 +28742,15 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } @@ -29499,7 +28759,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -29519,6 +28780,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) @@ -29527,15 +28791,15 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } @@ -29544,7 +28808,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -29567,20 +28832,21 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } x := z1.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -29603,20 +28869,21 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } x := z1.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -29638,13 +28905,12 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym @@ -29659,14 +28925,13 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -29684,15 +28949,14 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -29709,12 +28973,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -29730,12 +28993,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -29751,12 +29013,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -29772,12 +29033,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -29791,6 +29051,9 @@ func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -29798,12 +29061,11 @@ func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -29817,10 +29079,10 @@ func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETG (InvertFlags x)) // result: (SETL x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -29832,7 +29094,6 @@ func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -29843,7 +29104,6 @@ func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -29854,7 +29114,6 @@ func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -29865,7 +29124,6 @@ func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -29876,7 +29134,6 @@ func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -29887,10 +29144,10 @@ func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETGE (InvertFlags x)) // result: (SETLE x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -29902,7 +29159,6 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { // match: (SETGE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -29913,7 +29169,6 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { // match: (SETGE (FlagLT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -29924,7 +29179,6 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { // match: (SETGE (FlagLT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -29935,7 +29189,6 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { // match: (SETGE (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -29946,7 +29199,6 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { // match: (SETGE (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -29957,6 +29209,9 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) @@ -29964,13 +29219,12 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETLEstore) v.AuxInt = off v.Aux = sym @@ -29985,14 +29239,13 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -30010,15 +29263,14 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -30035,12 +29287,11 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30056,12 +29307,11 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30077,12 +29327,11 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30098,12 +29347,11 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30119,12 +29367,11 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30138,6 +29385,9 @@ func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) @@ -30145,13 +29395,12 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETLstore) v.AuxInt = off v.Aux = sym @@ -30166,14 +29415,13 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -30191,15 +29439,14 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -30216,12 +29463,11 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30237,12 +29483,11 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30258,12 +29503,11 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30279,12 +29523,11 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30300,12 +29543,11 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30319,10 +29561,10 @@ func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETL (InvertFlags x)) // result: (SETG x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -30334,7 +29576,6 @@ func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -30345,7 +29586,6 @@ func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -30356,7 +29596,6 @@ func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -30367,7 +29606,6 @@ func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -30378,7 +29616,6 @@ func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -30389,10 +29626,10 @@ func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { + v_0 := v.Args[0] // match: (SETLE (InvertFlags x)) // result: (SETGE x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -30404,7 +29641,6 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { // match: (SETLE (FlagEQ)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -30415,7 +29651,6 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { // match: (SETLE (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -30426,7 +29661,6 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { // match: (SETLE (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -30437,7 +29671,6 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { // match: (SETLE (FlagGT_ULT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -30448,7 +29681,6 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { // match: (SETLE (FlagGT_UGT)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -30459,6 +29691,9 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) @@ -30466,13 +29701,12 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETGEstore) v.AuxInt = off v.Aux = sym @@ -30487,14 +29721,13 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -30512,15 +29745,14 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -30537,12 +29769,11 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30558,12 +29789,11 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30579,12 +29809,11 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30600,12 +29829,11 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30621,12 +29849,11 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30640,6 +29867,9 @@ func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) @@ -30647,13 +29877,12 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETGstore) v.AuxInt = off v.Aux = sym @@ -30668,14 +29897,13 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -30693,15 +29921,14 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -30718,12 +29945,11 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30739,12 +29965,11 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30760,12 +29985,11 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30781,12 +30005,11 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30802,12 +30025,11 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -30821,17 +30043,18 @@ func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) // result: (SETB (BTL x y)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLL { continue } @@ -30840,7 +30063,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg(x) @@ -30853,13 +30076,13 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) // result: (SETB (BTQ x y)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLQ { continue } @@ -30868,7 +30091,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg(x) @@ -30882,7 +30105,6 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // cond: isUint32PowerOfTwo(c) // result: (SETB (BTLconst [log2uint32(c)] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTLconst { break } @@ -30902,7 +30124,6 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // cond: isUint64PowerOfTwo(c) // result: (SETB (BTQconst [log2(c)] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQconst { break } @@ -30922,18 +30143,18 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // cond: isUint64PowerOfTwo(c) // result: (SETB (BTQconst [log2(c)] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64MOVQconst { continue } c := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } @@ -30949,7 +30170,6 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) // result: (SETEQ (CMPLconst [0] s)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64CMPLconst || v_0.AuxInt != 1 { break } @@ -30967,7 +30187,6 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) // result: (SETEQ (CMPQconst [0] s)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64CMPQconst || v_0.AuxInt != 1 { break } @@ -30986,13 +30205,14 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // cond: z1==z2 // result: (SETB (BTQconst [63] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { continue } @@ -31001,7 +30221,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -31018,13 +30238,14 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // cond: z1==z2 // result: (SETB (BTQconst [31] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { continue } @@ -31033,7 +30254,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -31050,13 +30271,14 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { // cond: z1==z2 // result: (SETB (BTQconst [0] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } @@ -31065,7 +30287,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -31081,18 +30303,20 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) // cond: z1==z2 // result: (SETB (BTLconst [0] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } @@ -31101,7 +30325,7 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -31118,18 +30342,19 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // cond: z1==z2 // result: (SETB (BTQconst [63] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTQ { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -31146,18 +30371,19 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // cond: z1==z2 // result: (SETB (BTLconst [31] x)) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64TESTL { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -31173,7 +30399,6 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // match: (SETNE (InvertFlags x)) // result: (SETNE x) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64InvertFlags { break } @@ -31185,7 +30410,6 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // match: (SETNE (FlagEQ)) // result: (MOVLconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } @@ -31196,7 +30420,6 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // match: (SETNE (FlagLT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } @@ -31207,7 +30430,6 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // match: (SETNE (FlagLT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } @@ -31218,7 +30440,6 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // match: (SETNE (FlagGT_ULT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } @@ -31229,7 +30450,6 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { // match: (SETNE (FlagGT_UGT)) // result: (MOVLconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } @@ -31240,21 +30460,23 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) // result: (SETBstore [off] {sym} ptr (BTL x y) mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLL { continue } @@ -31263,7 +30485,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { continue } - y := v_1.Args[1^_i0] + y := v_1_1 + mem := v_2 v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym @@ -31282,15 +30505,14 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64SHLQ { continue } @@ -31299,7 +30521,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { continue } - y := v_1.Args[1^_i0] + y := v_1_1 + mem := v_2 v.reset(OpAMD64SETBstore) v.AuxInt = off v.Aux = sym @@ -31319,14 +30542,13 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTLconst { break } c := v_1.AuxInt x := v_1.Args[0] + mem := v_2 if !(isUint32PowerOfTwo(c)) { break } @@ -31347,14 +30569,13 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQconst { break } c := v_1.AuxInt x := v_1.Args[0] + mem := v_2 if !(isUint64PowerOfTwo(c)) { break } @@ -31375,20 +30596,20 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpAMD64MOVQconst { continue } c := v_1_0.AuxInt - x := v_1.Args[1^_i0] + x := v_1_1 + mem := v_2 if !(isUint64PowerOfTwo(c)) { continue } @@ -31410,9 +30631,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64CMPLconst || v_1.AuxInt != 1 { break } @@ -31420,6 +30639,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { break } + mem := v_2 v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym @@ -31436,9 +30656,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64CMPQconst || v_1.AuxInt != 1 { break } @@ -31446,6 +30664,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { break } + mem := v_2 v.reset(OpAMD64SETEQstore) v.AuxInt = off v.Aux = sym @@ -31463,15 +30682,15 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { continue } @@ -31480,7 +30699,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -31503,15 +30723,15 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { continue } @@ -31520,7 +30740,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -31543,15 +30764,15 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } @@ -31560,7 +30781,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -31580,6 +30802,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) @@ -31588,15 +30813,15 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } @@ -31605,7 +30830,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { continue } x := z1_0.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -31628,20 +30854,21 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTQ { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } x := z1.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -31664,20 +30891,21 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64TESTL { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } x := z1.Args[0] - z2 := v_1.Args[1^_i0] + z2 := v_1_1 + mem := v_2 if !(z1 == z2) { continue } @@ -31699,13 +30927,12 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64InvertFlags { break } x := v_1.Args[0] + mem := v_2 v.reset(OpAMD64SETNEstore) v.AuxInt = off v.Aux = sym @@ -31720,14 +30947,13 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -31745,15 +30971,14 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -31770,12 +30995,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -31791,12 +31015,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -31812,12 +31035,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -31833,12 +31055,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -31852,6 +31073,9 @@ func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) @@ -31859,12 +31083,11 @@ func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = off v.Aux = sym @@ -31878,13 +31101,13 @@ func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SHLL x (MOVQconst [c])) // result: (SHLLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -31897,9 +31120,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // match: (SHLL x (MOVLconst [c])) // result: (SHLLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -31913,9 +31134,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHLL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDQconst { break } @@ -31933,9 +31152,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHLL x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -31960,9 +31177,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHLL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDQconst { break } @@ -31980,9 +31195,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHLL x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32007,9 +31220,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHLL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDLconst { break } @@ -32027,9 +31238,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHLL x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32054,9 +31263,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHLL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDLconst { break } @@ -32074,9 +31281,7 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHLL x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32100,14 +31305,11 @@ func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHLLconst [1] (SHRLconst [1] x)) // result: (BTRLconst [0] x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRLconst || v_0.AuxInt != 1 { + if v.AuxInt != 1 || v_0.Op != OpAMD64SHRLconst || v_0.AuxInt != 1 { break } x := v_0.Args[0] @@ -32122,7 +31324,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -32131,13 +31333,13 @@ func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SHLQ x (MOVQconst [c])) // result: (SHLQconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -32150,9 +31352,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // match: (SHLQ x (MOVLconst [c])) // result: (SHLQconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -32166,9 +31366,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHLQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDQconst { break } @@ -32186,9 +31384,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHLQ x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32213,9 +31409,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHLQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDQconst { break } @@ -32233,9 +31427,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHLQ x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32260,9 +31452,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHLQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDLconst { break } @@ -32280,9 +31470,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHLQ x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32307,9 +31495,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHLQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDLconst { break } @@ -32327,9 +31513,7 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHLQ x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32353,14 +31537,11 @@ func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHLQconst [1] (SHRQconst [1] x)) // result: (BTRQconst [0] x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQconst || v_0.AuxInt != 1 { + if v.AuxInt != 1 || v_0.Op != OpAMD64SHRQconst || v_0.AuxInt != 1 { break } x := v_0.Args[0] @@ -32375,7 +31556,7 @@ func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -32384,13 +31565,13 @@ func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SHRB x (MOVQconst [c])) // cond: c&31 < 8 // result: (SHRBconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -32407,9 +31588,7 @@ func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { // cond: c&31 < 8 // result: (SHRBconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -32426,8 +31605,6 @@ func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { // cond: c&31 >= 8 // result: (MOVLconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpAMD64MOVQconst { break } @@ -32443,8 +31620,6 @@ func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { // cond: c&31 >= 8 // result: (MOVLconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpAMD64MOVLconst { break } @@ -32459,13 +31634,14 @@ func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRBconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -32474,13 +31650,13 @@ func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SHRL x (MOVQconst [c])) // result: (SHRLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -32493,9 +31669,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // match: (SHRL x (MOVLconst [c])) // result: (SHRLconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -32509,9 +31683,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHRL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDQconst { break } @@ -32529,9 +31701,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHRL x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32556,9 +31726,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHRL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDQconst { break } @@ -32576,9 +31744,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHRL x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32603,9 +31769,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHRL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDLconst { break } @@ -32623,9 +31787,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 0 // result: (SHRL x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32650,9 +31812,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHRL x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDLconst { break } @@ -32670,9 +31830,7 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { // cond: c & 31 == 31 // result: (SHRL x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32696,14 +31854,11 @@ func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRLconst [1] (SHLLconst [1] x)) // result: (BTRLconst [31] x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { + if v.AuxInt != 1 || v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { break } x := v_0.Args[0] @@ -32718,7 +31873,7 @@ func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -32727,13 +31882,13 @@ func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SHRQ x (MOVQconst [c])) // result: (SHRQconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -32746,9 +31901,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // match: (SHRQ x (MOVLconst [c])) // result: (SHRQconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -32762,9 +31915,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHRQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDQconst { break } @@ -32782,9 +31933,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHRQ x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32809,9 +31958,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHRQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDQconst { break } @@ -32829,9 +31976,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHRQ x (NEGQ y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGQ { break } @@ -32856,9 +32001,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHRQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ADDLconst { break } @@ -32876,9 +32019,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 0 // result: (SHRQ x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32903,9 +32044,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHRQ x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64ANDLconst { break } @@ -32923,9 +32062,7 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { // cond: c & 63 == 63 // result: (SHRQ x (NEGL y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64NEGL { break } @@ -32949,14 +32086,11 @@ func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRQconst [1] (SHLQconst [1] x)) // result: (BTRQconst [63] x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { + if v.AuxInt != 1 || v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { break } x := v_0.Args[0] @@ -32971,7 +32105,7 @@ func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -32980,13 +32114,13 @@ func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SHRW x (MOVQconst [c])) // cond: c&31 < 16 // result: (SHRWconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -33003,9 +32137,7 @@ func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { // cond: c&31 < 16 // result: (SHRWconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -33022,8 +32154,6 @@ func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { // cond: c&31 >= 16 // result: (MOVLconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpAMD64MOVQconst { break } @@ -33039,8 +32169,6 @@ func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { // cond: c&31 >= 16 // result: (MOVLconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpAMD64MOVLconst { break } @@ -33055,13 +32183,14 @@ func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SHRWconst x [0]) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -33070,13 +32199,13 @@ func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBL x (MOVLconst [c])) // result: (SUBLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVLconst { break } @@ -33089,12 +32218,11 @@ func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { // match: (SUBL (MOVLconst [c]) x) // result: (NEGL (SUBLconst x [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVLconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpAMD64NEGL) v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) v0.AuxInt = c @@ -33105,8 +32233,8 @@ func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { // match: (SUBL x x) // result: (MOVLconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpAMD64MOVLconst) @@ -33117,9 +32245,8 @@ func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != OpAMD64MOVLload { break } @@ -33141,12 +32268,13 @@ func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBLconst [c] x) // cond: int32(c) == 0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -33159,7 +32287,7 @@ func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { // result: (ADDLconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 v.reset(OpAMD64ADDLconst) v.AuxInt = int64(int32(-c)) v.AddArg(x) @@ -33167,6 +32295,9 @@ func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { } } func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -33175,14 +32306,13 @@ func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -33200,15 +32330,14 @@ func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -33225,10 +32354,8 @@ func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -33247,20 +32374,22 @@ func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(off1+off2) // result: (SUBLmodify [off1+off2] {sym} base val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -33278,15 +32407,14 @@ func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -33301,14 +32429,14 @@ func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBQ x (MOVQconst [c])) // cond: is32Bit(c) // result: (SUBQconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -33325,12 +32453,11 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { // cond: is32Bit(c) // result: (NEGQ (SUBQconst x [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } c := v_0.AuxInt + x := v_1 if !(is32Bit(c)) { break } @@ -33344,8 +32471,8 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { // match: (SUBQ x x) // result: (MOVQconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpAMD64MOVQconst) @@ -33356,9 +32483,8 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBQload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != OpAMD64MOVQload { break } @@ -33380,13 +32506,13 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBQborrow_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBQborrow x (MOVQconst [c])) // cond: is32Bit(c) // result: (SUBQconstborrow x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpAMD64MOVQconst { break } @@ -33402,13 +32528,14 @@ func rewriteValueAMD64_OpAMD64SUBQborrow_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBQconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -33419,7 +32546,7 @@ func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { // result: (ADDQconst [-c] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c != -(1 << 31)) { break } @@ -33432,7 +32559,6 @@ func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { // result: (MOVQconst [d-c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64MOVQconst { break } @@ -33446,7 +32572,6 @@ func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { // result: (ADDQconst [-c-d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64SUBQconst { break } @@ -33463,6 +32588,9 @@ func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -33471,14 +32599,13 @@ func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -33496,15 +32623,14 @@ func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -33521,10 +32647,8 @@ func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -33543,20 +32667,22 @@ func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(off1+off2) // result: (SUBQmodify [off1+off2] {sym} base val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64ADDQconst { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -33574,15 +32700,14 @@ func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpAMD64LEAQ { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -33597,13 +32722,14 @@ func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBSDload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != OpAMD64MOVSDload { break } @@ -33625,6 +32751,9 @@ func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -33633,14 +32762,13 @@ func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -33658,15 +32786,14 @@ func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -33683,10 +32810,8 @@ func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -33705,13 +32830,14 @@ func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBSSload x [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] + x := v_0 + l := v_1 if l.Op != OpAMD64MOVSSload { break } @@ -33733,6 +32859,9 @@ func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) @@ -33741,14 +32870,13 @@ func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -33766,15 +32894,14 @@ func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -33791,10 +32918,8 @@ func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr := v_1 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -33813,18 +32938,18 @@ func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TESTB (MOVLconst [c]) x) // result: (TESTBconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 v.reset(OpAMD64TESTBconst) v.AuxInt = c v.AddArg(x) @@ -33836,9 +32961,8 @@ func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 if l.Op != OpAMD64MOVBload { continue } @@ -33846,7 +32970,7 @@ func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] - l2 := v.Args[1^_i0] + l2 := v_1 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { continue } @@ -33865,6 +32989,7 @@ func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TESTBconst [-1] x) // cond: x.Op != OpAMD64MOVLconst // result: (TESTB x x) @@ -33872,7 +32997,7 @@ func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 if !(x.Op != OpAMD64MOVLconst) { break } @@ -33884,18 +33009,18 @@ func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TESTL (MOVLconst [c]) x) // result: (TESTLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 v.reset(OpAMD64TESTLconst) v.AuxInt = c v.AddArg(x) @@ -33907,9 +33032,8 @@ func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 if l.Op != OpAMD64MOVLload { continue } @@ -33917,7 +33041,7 @@ func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] - l2 := v.Args[1^_i0] + l2 := v_1 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { continue } @@ -33936,6 +33060,7 @@ func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TESTLconst [-1] x) // cond: x.Op != OpAMD64MOVLconst // result: (TESTL x x) @@ -33943,7 +33068,7 @@ func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 if !(x.Op != OpAMD64MOVLconst) { break } @@ -33955,19 +33080,19 @@ func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TESTQ (MOVQconst [c]) x) // cond: is32Bit(c) // result: (TESTQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(is32Bit(c)) { continue } @@ -33982,9 +33107,8 @@ func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 if l.Op != OpAMD64MOVQload { continue } @@ -33992,7 +33116,7 @@ func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] - l2 := v.Args[1^_i0] + l2 := v_1 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { continue } @@ -34011,6 +33135,7 @@ func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TESTQconst [-1] x) // cond: x.Op != OpAMD64MOVQconst // result: (TESTQ x x) @@ -34018,7 +33143,7 @@ func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 if !(x.Op != OpAMD64MOVQconst) { break } @@ -34030,18 +33155,18 @@ func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TESTW (MOVLconst [c]) x) // result: (TESTWconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 v.reset(OpAMD64TESTWconst) v.AuxInt = c v.AddArg(x) @@ -34053,9 +33178,8 @@ func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - l := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 if l.Op != OpAMD64MOVWload { continue } @@ -34063,7 +33187,7 @@ func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { sym := l.Aux mem := l.Args[1] ptr := l.Args[0] - l2 := v.Args[1^_i0] + l2 := v_1 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { continue } @@ -34082,6 +33206,7 @@ func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TESTWconst [-1] x) // cond: x.Op != OpAMD64MOVLconst // result: (TESTW x x) @@ -34089,7 +33214,7 @@ func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 if !(x.Op != OpAMD64MOVLconst) { break } @@ -34101,20 +33226,22 @@ func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (XADDLlock [off1+off2] {sym} val ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -34129,20 +33256,22 @@ func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (XADDQlock [off1+off2] {sym} val ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -34157,20 +33286,22 @@ func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (XCHGL [off1+off2] {sym} val ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -34188,15 +33319,14 @@ func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { break } @@ -34211,20 +33341,22 @@ func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (XCHGQ [off1+off2] {sym} val ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64ADDQconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -34242,15 +33374,14 @@ func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] + val := v_0 if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { break } @@ -34265,12 +33396,12 @@ func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XORL (SHLL (MOVLconst [1]) y) x) // result: (BTCL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLL { continue } @@ -34279,7 +33410,7 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpAMD64BTCL) v.AddArg(x) v.AddArg(y) @@ -34291,14 +33422,12 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 // result: (BTCLconst [log2uint32(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { continue } @@ -34312,10 +33441,8 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // match: (XORL x (MOVLconst [c])) // result: (XORLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVLconst { continue } @@ -34331,15 +33458,12 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // cond: d==32-c // result: (ROLLconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRLconst { continue } @@ -34359,15 +33483,12 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // result: (ROLWconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRWconst { continue } @@ -34387,15 +33508,12 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // result: (ROLBconst x [c]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLLconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRBconst { continue } @@ -34413,8 +33531,8 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // match: (XORL x x) // result: (MOVLconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpAMD64MOVLconst) @@ -34425,10 +33543,9 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVLload { continue } @@ -34452,12 +33569,13 @@ func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORLconst [c] x) // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 // result: (BTCLconst [log2uint32(c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { break } @@ -34469,11 +33587,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETNE x)) // result: (SETEQ x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETNE { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETNE { break } x := v_0.Args[0] @@ -34484,11 +33598,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETEQ x)) // result: (SETNE x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETEQ { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETEQ { break } x := v_0.Args[0] @@ -34499,11 +33609,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETL x)) // result: (SETGE x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETL { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETL { break } x := v_0.Args[0] @@ -34514,11 +33620,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETGE x)) // result: (SETL x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETGE { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETGE { break } x := v_0.Args[0] @@ -34529,11 +33631,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETLE x)) // result: (SETG x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETLE { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETLE { break } x := v_0.Args[0] @@ -34544,11 +33642,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETG x)) // result: (SETLE x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETG { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETG { break } x := v_0.Args[0] @@ -34559,11 +33653,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETB x)) // result: (SETAE x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETB { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETB { break } x := v_0.Args[0] @@ -34574,11 +33664,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETAE x)) // result: (SETB x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETAE { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETAE { break } x := v_0.Args[0] @@ -34589,11 +33675,7 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { // match: (XORLconst [1] (SETBE x)) // result: (SETA x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETBE { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETBE { break } x := v_0.Args[0] @@ -34604,14 +33686,11 @@ func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (XORLconst [1] (SETA x)) // result: (SETBE x) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETA { + if v.AuxInt != 1 || v_0.Op != OpAMD64SETA { break } x := v_0.Args[0] @@ -34623,7 +33702,6 @@ func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { // result: (XORLconst [c ^ d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64XORLconst { break } @@ -34638,7 +33716,6 @@ func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { // result: (XORLconst [c ^ 1<= 128 // result: (BTCQconst [log2(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { continue } @@ -34911,10 +33985,8 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // cond: is32Bit(c) // result: (XORQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAMD64MOVQconst { continue } @@ -34933,15 +34005,12 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // cond: d==64-c // result: (ROLQconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLQconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpAMD64SHRQconst { continue } @@ -34959,8 +34028,8 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // match: (XORQ x x) // result: (MOVQconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpAMD64MOVQconst) @@ -34971,10 +34040,9 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORQload x [off] {sym} ptr mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 if l.Op != OpAMD64MOVQload { continue } @@ -34998,12 +34066,13 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORQconst [c] x) // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 // result: (BTCQconst [log2(c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { break } @@ -35016,7 +34085,6 @@ func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { // result: (XORQconst [c ^ d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpAMD64XORQconst { break } @@ -35031,7 +34099,6 @@ func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { // result: (XORQconst [c ^ 1< val ptr mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) v0.AddArg(val) @@ -35338,14 +34419,17 @@ func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { } } func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicStore64 ptr val mem) // result: (Select1 (XCHGQ val ptr mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) v0.AddArg(val) @@ -35356,14 +34440,17 @@ func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { } } func rewriteValueAMD64_OpAtomicStore8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicStore8 ptr val mem) // result: (Select1 (XCHGB val ptr mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) v0.AddArg(val) @@ -35374,14 +34461,17 @@ func rewriteValueAMD64_OpAtomicStore8_0(v *Value) bool { } } func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicStorePtrNoWB ptr val mem) // result: (Select1 (XCHGQ val ptr mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) v0.AddArg(val) @@ -35392,12 +34482,13 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { } } func rewriteValueAMD64_OpBitLen16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen16 x) // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64BSRL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) v0.AuxInt = 1 @@ -35412,12 +34503,13 @@ func rewriteValueAMD64_OpBitLen16_0(v *Value) bool { } } func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen32 x) // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { - x := v.Args[0] + x := v_0 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) @@ -35434,13 +34526,14 @@ func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { } } func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpAMD64ADDQconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) @@ -35462,12 +34555,13 @@ func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { } } func rewriteValueAMD64_OpBitLen8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen8 x) // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64BSRL) v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) v0.AuxInt = 1 @@ -35482,10 +34576,11 @@ func rewriteValueAMD64_OpBitLen8_0(v *Value) bool { } } func rewriteValueAMD64_OpCeil_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ceil x) // result: (ROUNDSD [2] x) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64ROUNDSD) v.AuxInt = 2 v.AddArg(x) @@ -35493,15 +34588,16 @@ func rewriteValueAMD64_OpCeil_0(v *Value) bool { } } func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CondSelect x y (SETEQ cond)) // cond: (is64BitInt(t) || isPtr(t)) // result: (CMOVQEQ y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETEQ { break } @@ -35520,10 +34616,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQNE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETNE { break } @@ -35542,10 +34636,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQLT y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETL { break } @@ -35564,10 +34656,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQGT y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETG { break } @@ -35586,10 +34676,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQLE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETLE { break } @@ -35608,10 +34696,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQGE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGE { break } @@ -35630,10 +34716,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQHI y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETA { break } @@ -35652,10 +34736,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQCS y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETB { break } @@ -35674,10 +34756,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQCC y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETAE { break } @@ -35696,10 +34776,8 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { // result: (CMOVQLS y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETBE { break } @@ -35716,15 +34794,16 @@ func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { return false } func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CondSelect x y (SETEQF cond)) // cond: (is64BitInt(t) || isPtr(t)) // result: (CMOVQEQF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETEQF { break } @@ -35743,10 +34822,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVQNEF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETNEF { break } @@ -35765,10 +34842,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVQGTF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGF { break } @@ -35787,10 +34862,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVQGEF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGEF { break } @@ -35809,10 +34882,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVLEQ y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETEQ { break } @@ -35831,10 +34902,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVLNE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETNE { break } @@ -35853,10 +34922,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVLLT y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETL { break } @@ -35875,10 +34942,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVLGT y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETG { break } @@ -35897,10 +34962,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVLLE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETLE { break } @@ -35919,10 +34982,8 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { // result: (CMOVLGE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGE { break } @@ -35939,15 +35000,16 @@ func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { return false } func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CondSelect x y (SETA cond)) // cond: is32BitInt(t) // result: (CMOVLHI y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETA { break } @@ -35966,10 +35028,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLCS y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETB { break } @@ -35988,10 +35048,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLCC y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETAE { break } @@ -36010,10 +35068,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLLS y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETBE { break } @@ -36032,10 +35088,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLEQF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETEQF { break } @@ -36054,10 +35108,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLNEF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETNEF { break } @@ -36076,10 +35128,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLGTF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGF { break } @@ -36098,10 +35148,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVLGEF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGEF { break } @@ -36120,10 +35168,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVWEQ y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETEQ { break } @@ -36142,10 +35188,8 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { // result: (CMOVWNE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETNE { break } @@ -36162,15 +35206,16 @@ func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { return false } func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CondSelect x y (SETL cond)) // cond: is16BitInt(t) // result: (CMOVWLT y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETL { break } @@ -36189,10 +35234,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWGT y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETG { break } @@ -36211,10 +35254,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWLE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETLE { break } @@ -36233,10 +35274,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWGE y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGE { break } @@ -36255,10 +35294,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWHI y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETA { break } @@ -36277,10 +35314,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWCS y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETB { break } @@ -36299,10 +35334,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWCC y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETAE { break } @@ -36321,10 +35354,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWLS y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETBE { break } @@ -36343,10 +35374,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWEQF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETEQF { break } @@ -36365,10 +35394,8 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { // result: (CMOVWNEF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETNEF { break } @@ -36385,6 +35412,9 @@ func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { return false } func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CondSelect x y (SETGF cond)) @@ -36392,10 +35422,8 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CMOVWGTF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGF { break } @@ -36414,10 +35442,8 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CMOVWGEF y x cond) for { t := v.Type - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpAMD64SETGEF { break } @@ -36436,9 +35462,9 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CondSelect x y (MOVBQZX check)) for { t := v.Type - check := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + check := v_2 if !(!check.Type.IsFlags() && check.Type.Size() == 1) { break } @@ -36456,9 +35482,9 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CondSelect x y (MOVWQZX check)) for { t := v.Type - check := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + check := v_2 if !(!check.Type.IsFlags() && check.Type.Size() == 2) { break } @@ -36476,9 +35502,9 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CondSelect x y (MOVLQZX check)) for { t := v.Type - check := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + check := v_2 if !(!check.Type.IsFlags() && check.Type.Size() == 4) { break } @@ -36496,9 +35522,9 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CMOVQNE y x (CMPQconst [0] check)) for { t := v.Type - check := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + check := v_2 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { break } @@ -36516,9 +35542,9 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CMOVLNE y x (CMPQconst [0] check)) for { t := v.Type - check := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + check := v_2 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { break } @@ -36536,9 +35562,9 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { // result: (CMOVWNE y x (CMPQconst [0] check)) for { t := v.Type - check := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + check := v_2 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { break } @@ -36554,12 +35580,13 @@ func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { return false } func rewriteValueAMD64_OpCtz16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) // result: (BSFL (BTSLconst [16] x)) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64BSFL) v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) v0.AuxInt = 16 @@ -36569,12 +35596,13 @@ func rewriteValueAMD64_OpCtz16_0(v *Value) bool { } } func rewriteValueAMD64_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz32 x) // result: (Select0 (BSFQ (BTSQconst [32] x))) for { - x := v.Args[0] + x := v_0 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) @@ -36586,13 +35614,14 @@ func rewriteValueAMD64_OpCtz32_0(v *Value) bool { } } func rewriteValueAMD64_OpCtz64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz64 x) // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpAMD64CMOVQEQ) v0 := b.NewValue0(v.Pos, OpSelect0, t) v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) @@ -36611,12 +35640,13 @@ func rewriteValueAMD64_OpCtz64_0(v *Value) bool { } } func rewriteValueAMD64_OpCtz64NonZero_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz64NonZero x) // result: (Select0 (BSFQ x)) for { - x := v.Args[0] + x := v_0 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) v0.AddArg(x) @@ -36625,12 +35655,13 @@ func rewriteValueAMD64_OpCtz64NonZero_0(v *Value) bool { } } func rewriteValueAMD64_OpCtz8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) // result: (BSFL (BTSLconst [ 8] x)) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64BSFL) v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) v0.AuxInt = 8 @@ -36640,14 +35671,16 @@ func rewriteValueAMD64_OpCtz8_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 [a] x y) // result: (Select0 (DIVW [a] x y)) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0.AuxInt = a @@ -36658,13 +35691,15 @@ func rewriteValueAMD64_OpDiv16_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (Select0 (DIVWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v0.AddArg(x) @@ -36674,14 +35709,16 @@ func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32 [a] x y) // result: (Select0 (DIVL [a] x y)) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0.AuxInt = a @@ -36692,13 +35729,15 @@ func rewriteValueAMD64_OpDiv32_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32u x y) // result: (Select0 (DIVLU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg(x) @@ -36708,14 +35747,16 @@ func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div64 [a] x y) // result: (Select0 (DIVQ [a] x y)) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0.AuxInt = a @@ -36726,13 +35767,15 @@ func rewriteValueAMD64_OpDiv64_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div64u x y) // result: (Select0 (DIVQU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -36742,13 +35785,15 @@ func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) @@ -36762,13 +35807,15 @@ func rewriteValueAMD64_OpDiv8_0(v *Value) bool { } } func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) @@ -36782,12 +35829,14 @@ func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { } } func rewriteValueAMD64_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq16 x y) // result: (SETEQ (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -36797,12 +35846,14 @@ func rewriteValueAMD64_OpEq16_0(v *Value) bool { } } func rewriteValueAMD64_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32 x y) // result: (SETEQ (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -36812,12 +35863,14 @@ func rewriteValueAMD64_OpEq32_0(v *Value) bool { } } func rewriteValueAMD64_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (SETEQF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -36827,12 +35880,14 @@ func rewriteValueAMD64_OpEq32F_0(v *Value) bool { } } func rewriteValueAMD64_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64 x y) // result: (SETEQ (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -36842,12 +35897,14 @@ func rewriteValueAMD64_OpEq64_0(v *Value) bool { } } func rewriteValueAMD64_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (SETEQF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -36857,12 +35914,14 @@ func rewriteValueAMD64_OpEq64F_0(v *Value) bool { } } func rewriteValueAMD64_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq8 x y) // result: (SETEQ (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -36872,12 +35931,14 @@ func rewriteValueAMD64_OpEq8_0(v *Value) bool { } } func rewriteValueAMD64_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqB x y) // result: (SETEQ (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -36887,12 +35948,14 @@ func rewriteValueAMD64_OpEqB_0(v *Value) bool { } } func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (SETEQ (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETEQ) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -36902,12 +35965,15 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { } } func rewriteValueAMD64_OpFMA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMA x y z) // result: (VFMADD231SD z x y) for { - z := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + z := v_2 v.reset(OpAMD64VFMADD231SD) v.AddArg(z) v.AddArg(x) @@ -36916,10 +35982,11 @@ func rewriteValueAMD64_OpFMA_0(v *Value) bool { } } func rewriteValueAMD64_OpFloor_0(v *Value) bool { + v_0 := v.Args[0] // match: (Floor x) // result: (ROUNDSD [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64ROUNDSD) v.AuxInt = 1 v.AddArg(x) @@ -36927,12 +35994,14 @@ func rewriteValueAMD64_OpFloor_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq16 x y) // result: (SETGE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -36942,12 +36011,14 @@ func rewriteValueAMD64_OpGeq16_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq16U x y) // result: (SETAE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -36957,12 +36028,14 @@ func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32 x y) // result: (SETGE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -36972,12 +36045,14 @@ func rewriteValueAMD64_OpGeq32_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (SETGEF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -36987,12 +36062,14 @@ func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32U x y) // result: (SETAE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37002,12 +36079,14 @@ func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64 x y) // result: (SETGE (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37017,12 +36096,14 @@ func rewriteValueAMD64_OpGeq64_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (SETGEF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -37032,12 +36113,14 @@ func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64U x y) // result: (SETAE (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37047,12 +36130,14 @@ func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq8 x y) // result: (SETGE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37062,12 +36147,14 @@ func rewriteValueAMD64_OpGeq8_0(v *Value) bool { } } func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq8U x y) // result: (SETAE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETAE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37077,12 +36164,14 @@ func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater16 x y) // result: (SETG (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETG) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -37092,12 +36181,14 @@ func rewriteValueAMD64_OpGreater16_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater16U x y) // result: (SETA (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETA) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -37107,12 +36198,14 @@ func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32 x y) // result: (SETG (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETG) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37122,12 +36215,14 @@ func rewriteValueAMD64_OpGreater32_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (SETGF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -37137,12 +36232,14 @@ func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32U x y) // result: (SETA (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETA) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37152,12 +36249,14 @@ func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64 x y) // result: (SETG (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETG) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37167,12 +36266,14 @@ func rewriteValueAMD64_OpGreater64_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (SETGF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -37182,12 +36283,14 @@ func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64U x y) // result: (SETA (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETA) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37197,12 +36300,14 @@ func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater8 x y) // result: (SETG (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETG) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37212,12 +36317,14 @@ func rewriteValueAMD64_OpGreater8_0(v *Value) bool { } } func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater8U x y) // result: (SETA (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETA) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37227,12 +36334,14 @@ func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { } } func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsInBounds idx len) // result: (SETB (CMPQ idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(idx) @@ -37242,11 +36351,12 @@ func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { } } func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (IsNonNil p) // result: (SETNE (TESTQ p p)) for { - p := v.Args[0] + p := v_0 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) v0.AddArg(p) @@ -37256,12 +36366,14 @@ func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { } } func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsSliceInBounds idx len) // result: (SETBE (CMPQ idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(idx) @@ -37271,12 +36383,14 @@ func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq16 x y) // result: (SETLE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -37286,12 +36400,14 @@ func rewriteValueAMD64_OpLeq16_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq16U x y) // result: (SETBE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -37301,12 +36417,14 @@ func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32 x y) // result: (SETLE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37316,12 +36434,14 @@ func rewriteValueAMD64_OpLeq32_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (SETGEF (UCOMISS y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) v0.AddArg(y) @@ -37331,12 +36451,14 @@ func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32U x y) // result: (SETBE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37346,12 +36468,14 @@ func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64 x y) // result: (SETLE (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37361,12 +36485,14 @@ func rewriteValueAMD64_OpLeq64_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (SETGEF (UCOMISD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg(y) @@ -37376,12 +36502,14 @@ func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64U x y) // result: (SETBE (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37391,12 +36519,14 @@ func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq8 x y) // result: (SETLE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETLE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37406,12 +36536,14 @@ func rewriteValueAMD64_OpLeq8_0(v *Value) bool { } } func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq8U x y) // result: (SETBE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETBE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37421,12 +36553,14 @@ func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { } } func rewriteValueAMD64_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less16 x y) // result: (SETL (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -37436,12 +36570,14 @@ func rewriteValueAMD64_OpLess16_0(v *Value) bool { } } func rewriteValueAMD64_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less16U x y) // result: (SETB (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -37451,12 +36587,14 @@ func rewriteValueAMD64_OpLess16U_0(v *Value) bool { } } func rewriteValueAMD64_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32 x y) // result: (SETL (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37466,12 +36604,14 @@ func rewriteValueAMD64_OpLess32_0(v *Value) bool { } } func rewriteValueAMD64_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (SETGF (UCOMISS y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) v0.AddArg(y) @@ -37481,12 +36621,14 @@ func rewriteValueAMD64_OpLess32F_0(v *Value) bool { } } func rewriteValueAMD64_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32U x y) // result: (SETB (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -37496,12 +36638,14 @@ func rewriteValueAMD64_OpLess32U_0(v *Value) bool { } } func rewriteValueAMD64_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64 x y) // result: (SETL (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37511,12 +36655,14 @@ func rewriteValueAMD64_OpLess64_0(v *Value) bool { } } func rewriteValueAMD64_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (SETGF (UCOMISD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg(y) @@ -37526,12 +36672,14 @@ func rewriteValueAMD64_OpLess64F_0(v *Value) bool { } } func rewriteValueAMD64_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64U x y) // result: (SETB (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -37541,12 +36689,14 @@ func rewriteValueAMD64_OpLess64U_0(v *Value) bool { } } func rewriteValueAMD64_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less8 x y) // result: (SETL (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37556,12 +36706,14 @@ func rewriteValueAMD64_OpLess8_0(v *Value) bool { } } func rewriteValueAMD64_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less8U x y) // result: (SETB (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -37571,13 +36723,15 @@ func rewriteValueAMD64_OpLess8U_0(v *Value) bool { } } func rewriteValueAMD64_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: (is64BitInt(t) || isPtr(t)) // result: (MOVQload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } @@ -37591,8 +36745,8 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { // result: (MOVLload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t)) { break } @@ -37606,8 +36760,8 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t)) { break } @@ -37621,8 +36775,8 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean() || is8BitInt(t)) { break } @@ -37636,8 +36790,8 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { // result: (MOVSSload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -37651,8 +36805,8 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { // result: (MOVSDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -37664,12 +36818,12 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool { return false } func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (LEAQ {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpAMD64LEAQ) v.Aux = sym v.AddArg(base) @@ -37677,14 +36831,16 @@ func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool { } } func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x16 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37705,8 +36861,8 @@ func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37718,14 +36874,16 @@ func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x32 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37746,8 +36904,8 @@ func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37759,14 +36917,16 @@ func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x64 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37787,8 +36947,8 @@ func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37800,14 +36960,16 @@ func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x8 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37828,8 +36990,8 @@ func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37841,14 +37003,16 @@ func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x16 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37869,8 +37033,8 @@ func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37882,14 +37046,16 @@ func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x32 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37910,8 +37076,8 @@ func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37923,14 +37089,16 @@ func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x64 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37951,8 +37119,8 @@ func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -37964,14 +37132,16 @@ func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x8 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -37992,8 +37162,8 @@ func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38005,14 +37175,16 @@ func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x16 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38033,8 +37205,8 @@ func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38046,14 +37218,16 @@ func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x32 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38074,8 +37248,8 @@ func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38087,14 +37261,16 @@ func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x64 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38115,8 +37291,8 @@ func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38128,14 +37304,16 @@ func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x8 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38156,8 +37334,8 @@ func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38169,14 +37347,16 @@ func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x16 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38197,8 +37377,8 @@ func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38210,14 +37390,16 @@ func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x32 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38238,8 +37420,8 @@ func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38251,14 +37433,16 @@ func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x64 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38279,8 +37463,8 @@ func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38292,14 +37476,16 @@ func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x8 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -38320,8 +37506,8 @@ func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -38333,14 +37519,16 @@ func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 [a] x y) // result: (Select1 (DIVW [a] x y)) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0.AuxInt = a @@ -38351,13 +37539,15 @@ func rewriteValueAMD64_OpMod16_0(v *Value) bool { } } func rewriteValueAMD64_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (Select1 (DIVWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v0.AddArg(x) @@ -38367,14 +37557,16 @@ func rewriteValueAMD64_OpMod16u_0(v *Value) bool { } } func rewriteValueAMD64_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 [a] x y) // result: (Select1 (DIVL [a] x y)) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0.AuxInt = a @@ -38385,13 +37577,15 @@ func rewriteValueAMD64_OpMod32_0(v *Value) bool { } } func rewriteValueAMD64_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (Select1 (DIVLU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg(x) @@ -38401,14 +37595,16 @@ func rewriteValueAMD64_OpMod32u_0(v *Value) bool { } } func rewriteValueAMD64_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod64 [a] x y) // result: (Select1 (DIVQ [a] x y)) for { a := v.AuxInt - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0.AuxInt = a @@ -38419,13 +37615,15 @@ func rewriteValueAMD64_OpMod64_0(v *Value) bool { } } func rewriteValueAMD64_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod64u x y) // result: (Select1 (DIVQU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -38435,13 +37633,15 @@ func rewriteValueAMD64_OpMod64u_0(v *Value) bool { } } func rewriteValueAMD64_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) @@ -38455,13 +37655,15 @@ func rewriteValueAMD64_OpMod8_0(v *Value) bool { } } func rewriteValueAMD64_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) @@ -38475,6 +37677,9 @@ func rewriteValueAMD64_OpMod8u_0(v *Value) bool { } } func rewriteValueAMD64_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -38484,7 +37689,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -38496,9 +37701,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) @@ -38514,9 +37719,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVWstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) @@ -38532,9 +37737,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVLstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) @@ -38550,9 +37755,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVQstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) @@ -38569,9 +37774,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(config.useSSE) { break } @@ -38591,9 +37796,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(!config.useSSE) { break } @@ -38621,9 +37826,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 32 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMove) v.AuxInt = 16 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) @@ -38649,9 +37854,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 48 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(config.useSSE) { break } @@ -38680,9 +37885,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { if v.AuxInt != 64 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(config.useSSE) { break } @@ -38707,6 +37912,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { return false } func rewriteValueAMD64_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -38716,9 +37924,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -38743,9 +37951,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 4 v.AddArg(dst) @@ -38770,9 +37978,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = 4 v.AddArg(dst) @@ -38797,9 +38005,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = 3 v.AddArg(dst) @@ -38824,9 +38032,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 9 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVBstore) v.AuxInt = 8 v.AddArg(dst) @@ -38851,9 +38059,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 10 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVWstore) v.AuxInt = 8 v.AddArg(dst) @@ -38878,9 +38086,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { if v.AuxInt != 12 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpAMD64MOVLstore) v.AuxInt = 8 v.AddArg(dst) @@ -38904,9 +38112,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s == 11 || s >= 13 && s <= 15) { break } @@ -38933,9 +38141,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 16 && s%16 != 0 && s%16 <= 8) { break } @@ -38964,9 +38172,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { break } @@ -38993,6 +38201,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { return false } func rewriteValueAMD64_OpMove_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -39001,9 +38212,9 @@ func rewriteValueAMD64_OpMove_20(v *Value) bool { // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { break } @@ -39041,9 +38252,9 @@ func rewriteValueAMD64_OpMove_20(v *Value) bool { // result: (DUFFCOPY [14*(64-s/16)] dst src mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { break } @@ -39059,9 +38270,9 @@ func rewriteValueAMD64_OpMove_20(v *Value) bool { // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { break } @@ -39077,12 +38288,13 @@ func rewriteValueAMD64_OpMove_20(v *Value) bool { return false } func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg32F x) // result: (PXOR x (MOVSSconst [auxFrom32F(float32(math.Copysign(0, -1)))])) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64PXOR) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) @@ -39092,12 +38304,13 @@ func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { } } func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg64F x) // result: (PXOR x (MOVSDconst [auxFrom64F(math.Copysign(0, -1))])) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64PXOR) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) @@ -39107,12 +38320,14 @@ func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { } } func rewriteValueAMD64_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq16 x y) // result: (SETNE (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg(x) @@ -39122,12 +38337,14 @@ func rewriteValueAMD64_OpNeq16_0(v *Value) bool { } } func rewriteValueAMD64_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32 x y) // result: (SETNE (CMPL x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg(x) @@ -39137,12 +38354,14 @@ func rewriteValueAMD64_OpNeq32_0(v *Value) bool { } } func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (SETNEF (UCOMISS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) v0.AddArg(x) @@ -39152,12 +38371,14 @@ func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { } } func rewriteValueAMD64_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64 x y) // result: (SETNE (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -39167,12 +38388,14 @@ func rewriteValueAMD64_OpNeq64_0(v *Value) bool { } } func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (SETNEF (UCOMISD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg(x) @@ -39182,12 +38405,14 @@ func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { } } func rewriteValueAMD64_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq8 x y) // result: (SETNE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -39197,12 +38422,14 @@ func rewriteValueAMD64_OpNeq8_0(v *Value) bool { } } func rewriteValueAMD64_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqB x y) // result: (SETNE (CMPB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg(x) @@ -39212,12 +38439,14 @@ func rewriteValueAMD64_OpNeqB_0(v *Value) bool { } } func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (SETNE (CMPQ x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg(x) @@ -39227,10 +38456,11 @@ func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { } } func rewriteValueAMD64_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORLconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64XORLconst) v.AuxInt = 1 v.AddArg(x) @@ -39238,6 +38468,7 @@ func rewriteValueAMD64_OpNot_0(v *Value) bool { } } func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OffPtr [off] ptr) @@ -39245,7 +38476,7 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { // result: (ADDQconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if !(is32Bit(off)) { break } @@ -39258,7 +38489,7 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { // result: (ADDQ (MOVQconst [off]) ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpAMD64ADDQ) v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = off @@ -39268,14 +38499,17 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { } } func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -39291,9 +38525,9 @@ func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -39309,9 +38543,9 @@ func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -39325,12 +38559,13 @@ func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount16 x) // result: (POPCNTL (MOVWQZX x)) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64POPCNTL) v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) v0.AddArg(x) @@ -39339,12 +38574,13 @@ func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { } } func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount8 x) // result: (POPCNTL (MOVBQZX x)) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64POPCNTL) v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) v0.AddArg(x) @@ -39353,10 +38589,11 @@ func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { } } func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { + v_0 := v.Args[0] // match: (RoundToEven x) // result: (ROUNDSD [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64ROUNDSD) v.AuxInt = 0 v.AddArg(x) @@ -39364,14 +38601,16 @@ func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { } } func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux16 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39392,8 +38631,8 @@ func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39405,14 +38644,16 @@ func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux32 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39433,8 +38674,8 @@ func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39446,14 +38687,16 @@ func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux64 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39474,8 +38717,8 @@ func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39487,14 +38730,16 @@ func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux8 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39515,8 +38760,8 @@ func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39528,14 +38773,16 @@ func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x16 x y) // cond: !shiftIsBounded(v) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39559,8 +38806,8 @@ func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39572,14 +38819,16 @@ func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x32 x y) // cond: !shiftIsBounded(v) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39603,8 +38852,8 @@ func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39616,14 +38865,16 @@ func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x64 x y) // cond: !shiftIsBounded(v) // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39647,8 +38898,8 @@ func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39660,14 +38911,16 @@ func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x8 x y) // cond: !shiftIsBounded(v) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39691,8 +38944,8 @@ func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39704,14 +38957,16 @@ func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux16 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39732,8 +38987,8 @@ func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39745,14 +39000,16 @@ func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux32 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39773,8 +39030,8 @@ func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39786,14 +39043,16 @@ func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux64 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39814,8 +39073,8 @@ func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39827,14 +39086,16 @@ func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux8 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39855,8 +39116,8 @@ func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39868,14 +39129,16 @@ func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x16 x y) // cond: !shiftIsBounded(v) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39899,8 +39162,8 @@ func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39912,14 +39175,16 @@ func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x32 x y) // cond: !shiftIsBounded(v) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39943,8 +39208,8 @@ func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -39956,14 +39221,16 @@ func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x64 x y) // cond: !shiftIsBounded(v) // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -39987,8 +39254,8 @@ func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40000,14 +39267,16 @@ func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x8 x y) // cond: !shiftIsBounded(v) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40031,8 +39300,8 @@ func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40044,14 +39313,16 @@ func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux16 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40072,8 +39343,8 @@ func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40085,14 +39356,16 @@ func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux32 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40113,8 +39386,8 @@ func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40126,14 +39399,16 @@ func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux64 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40154,8 +39429,8 @@ func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40167,14 +39442,16 @@ func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux8 x y) // cond: !shiftIsBounded(v) // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40195,8 +39472,8 @@ func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40208,14 +39485,16 @@ func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x16 x y) // cond: !shiftIsBounded(v) // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40239,8 +39518,8 @@ func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40252,14 +39531,16 @@ func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x32 x y) // cond: !shiftIsBounded(v) // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40283,8 +39564,8 @@ func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40296,14 +39577,16 @@ func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 x y) // cond: !shiftIsBounded(v) // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40327,8 +39610,8 @@ func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40340,14 +39623,16 @@ func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x8 x y) // cond: !shiftIsBounded(v) // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40371,8 +39656,8 @@ func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARQ x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40384,14 +39669,16 @@ func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux16 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40412,8 +39699,8 @@ func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40425,14 +39712,16 @@ func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux32 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40453,8 +39742,8 @@ func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40466,14 +39755,16 @@ func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux64 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40494,8 +39785,8 @@ func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40507,14 +39798,16 @@ func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux8 x y) // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40535,8 +39828,8 @@ func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SHRB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40548,14 +39841,16 @@ func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x16 x y) // cond: !shiftIsBounded(v) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40579,8 +39874,8 @@ func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40592,14 +39887,16 @@ func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x32 x y) // cond: !shiftIsBounded(v) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40623,8 +39920,8 @@ func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40636,14 +39933,16 @@ func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x64 x y) // cond: !shiftIsBounded(v) // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40667,8 +39966,8 @@ func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40680,14 +39979,16 @@ func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { return false } func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x8 x y) // cond: !shiftIsBounded(v) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(!shiftIsBounded(v)) { break } @@ -40711,8 +40012,8 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SARB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -40724,12 +40025,12 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { return false } func rewriteValueAMD64_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select0 (Mul64uover x y)) // result: (Select0 (MULQU x y)) for { - v_0 := v.Args[0] if v_0.Op != OpMul64uover { break } @@ -40746,7 +40047,6 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { // match: (Select0 (Mul32uover x y)) // result: (Select0 (MULLU x y)) for { - v_0 := v.Args[0] if v_0.Op != OpMul32uover { break } @@ -40763,7 +40063,6 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { // match: (Select0 (Add64carry x y c)) // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64carry { break } @@ -40786,7 +40085,6 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { // match: (Select0 (Sub64borrow x y c)) // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) for { - v_0 := v.Args[0] if v_0.Op != OpSub64borrow { break } @@ -40810,7 +40108,6 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { // result: (ADDL val (Select0 tuple)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpAMD64AddTupleFirst32 { break } @@ -40827,7 +40124,6 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { // result: (ADDQ val (Select0 tuple)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpAMD64AddTupleFirst64 { break } @@ -40843,12 +40139,12 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { return false } func rewriteValueAMD64_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select1 (Mul64uover x y)) // result: (SETO (Select1 (MULQU x y))) for { - v_0 := v.Args[0] if v_0.Op != OpMul64uover { break } @@ -40866,7 +40162,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (Mul32uover x y)) // result: (SETO (Select1 (MULLU x y))) for { - v_0 := v.Args[0] if v_0.Op != OpMul32uover { break } @@ -40884,7 +40179,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (Add64carry x y c)) // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64carry { break } @@ -40911,7 +40205,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (Sub64borrow x y c)) // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) for { - v_0 := v.Args[0] if v_0.Op != OpSub64borrow { break } @@ -40938,7 +40231,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (NEGLflags (MOVQconst [0]))) // result: (FlagEQ) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64NEGLflags { break } @@ -40952,7 +40244,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpAMD64NEGLflags { break } @@ -40973,7 +40264,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (AddTupleFirst32 _ tuple)) // result: (Select1 tuple) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64AddTupleFirst32 { break } @@ -40985,7 +40275,6 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { // match: (Select1 (AddTupleFirst64 _ tuple)) // result: (Select1 tuple) for { - v_0 := v.Args[0] if v_0.Op != OpAMD64AddTupleFirst64 { break } @@ -40997,12 +40286,13 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { return false } func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SARQconst (NEGQ x) [63]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpAMD64SARQconst) v.AuxInt = 63 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) @@ -41012,14 +40302,17 @@ func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { } } func rewriteValueAMD64_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) // result: (MOVSDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -41034,9 +40327,9 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool { // result: (MOVSSstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -41051,9 +40344,9 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool { // result: (MOVQstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8) { break } @@ -41068,9 +40361,9 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool { // result: (MOVLstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4) { break } @@ -41085,9 +40378,9 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -41102,9 +40395,9 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool { // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -41117,10 +40410,11 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool { return false } func rewriteValueAMD64_OpTrunc_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc x) // result: (ROUNDSD [3] x) for { - x := v.Args[0] + x := v_0 v.reset(OpAMD64ROUNDSD) v.AuxInt = 3 v.AddArg(x) @@ -41128,6 +40422,8 @@ func rewriteValueAMD64_OpTrunc_0(v *Value) bool { } } func rewriteValueAMD64_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (Zero [0] _ mem) @@ -41136,7 +40432,7 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -41148,8 +40444,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -41162,8 +40458,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVWstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -41176,8 +40472,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVLstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -41190,8 +40486,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVQstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -41204,8 +40500,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) v.AddArg(destptr) @@ -41222,8 +40518,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -41240,8 +40536,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVWstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -41258,8 +40554,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpAMD64MOVLstoreconst) v.AuxInt = makeValAndOff(0, 3) v.AddArg(destptr) @@ -41275,8 +40571,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (MOVQstoreconst [0] destptr mem)) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s%8 != 0 && s > 8 && !config.useSSE) { break } @@ -41296,6 +40592,8 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { return false } func rewriteValueAMD64_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (Zero [16] destptr mem) @@ -41305,8 +40603,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(!config.useSSE) { break } @@ -41327,8 +40625,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 24 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(!config.useSSE) { break } @@ -41353,8 +40651,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 32 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(!config.useSSE) { break } @@ -41381,8 +40679,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s > 8 && s < 16 && config.useSSE) { break } @@ -41401,8 +40699,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { break } @@ -41426,8 +40724,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVQstoreconst [0] destptr mem)) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { break } @@ -41451,8 +40749,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(config.useSSE) { break } @@ -41471,8 +40769,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 32 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(config.useSSE) { break } @@ -41500,8 +40798,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 48 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(config.useSSE) { break } @@ -41538,8 +40836,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { if v.AuxInt != 64 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(config.useSSE) { break } @@ -41581,6 +40879,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { return false } func rewriteValueAMD64_OpZero_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -41589,8 +40889,8 @@ func rewriteValueAMD64_OpZero_20(v *Value) bool { // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { break } @@ -41608,8 +40908,8 @@ func rewriteValueAMD64_OpZero_20(v *Value) bool { // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { break } @@ -41634,8 +40934,9 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLL { continue } @@ -41644,7 +40945,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg(x) @@ -41659,8 +40960,9 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLQ { continue } @@ -41669,7 +40971,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 b.Reset(BlockAMD64UGE) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg(x) @@ -41719,13 +41021,14 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64MOVQconst { continue } c := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } @@ -41744,8 +41047,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { continue } @@ -41754,7 +41059,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -41773,8 +41078,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { continue } @@ -41783,7 +41090,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -41802,8 +41109,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } @@ -41812,7 +41121,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -41831,8 +41140,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } @@ -41841,7 +41152,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -41860,13 +41171,15 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -41885,13 +41198,15 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -42468,8 +41783,9 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLL { continue } @@ -42478,7 +41794,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) v0.AddArg(x) @@ -42493,8 +41809,9 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64SHLQ { continue } @@ -42503,7 +41820,7 @@ func rewriteBlockAMD64(b *Block) bool { if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { continue } - y := v_0.Args[1^_i0] + y := v_0_1 b.Reset(BlockAMD64ULT) v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) v0.AddArg(x) @@ -42553,13 +41870,14 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpAMD64MOVQconst { continue } c := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(isUint64PowerOfTwo(c)) { continue } @@ -42578,8 +41896,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { continue } @@ -42588,7 +41908,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -42607,8 +41927,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { continue } @@ -42617,7 +41939,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -42636,8 +41958,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } @@ -42646,7 +41970,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -42665,8 +41989,10 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } @@ -42675,7 +42001,7 @@ func rewriteBlockAMD64(b *Block) bool { continue } x := z1_0.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -42694,13 +42020,15 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } @@ -42719,13 +42047,15 @@ func rewriteBlockAMD64(b *Block) bool { for b.Controls[0].Op == OpAMD64TESTL { v_0 := b.Controls[0] _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - z1 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { continue } x := z1.Args[0] - z2 := v_0.Args[1^_i0] + z2 := v_0_1 if !(z1 == z2) { continue } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go index a7aa51268d..ec7d2270b3 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go @@ -25,6 +25,8 @@ func rewriteValueAMD64splitload(v *Value) bool { return false } func rewriteValueAMD64splitload_OpAMD64CMPBconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPBconstload {sym} [vo] ptr mem) @@ -32,8 +34,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpAMD64CMPBconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) @@ -46,6 +48,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPBload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPBload {sym} [off] ptr x mem) @@ -53,9 +58,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(OpAMD64CMPB) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) v0.AuxInt = off @@ -68,6 +73,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPBload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPLconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPLconstload {sym} [vo] ptr mem) @@ -75,8 +82,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpAMD64CMPLconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) @@ -89,6 +96,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPLload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPLload {sym} [off] ptr x mem) @@ -96,9 +106,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(OpAMD64CMPL) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = off @@ -111,6 +121,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPLload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPQconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPQconstload {sym} [vo] ptr mem) @@ -118,8 +130,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpAMD64CMPQconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) @@ -132,6 +144,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPQload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPQload {sym} [off] ptr x mem) @@ -139,9 +154,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(OpAMD64CMPQ) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) v0.AuxInt = off @@ -154,6 +169,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPQload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPWconstload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPWconstload {sym} [vo] ptr mem) @@ -161,8 +178,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload_0(v *Value) bool { for { vo := v.AuxInt sym := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpAMD64CMPWconst) v.AuxInt = valOnly(vo) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) @@ -175,6 +192,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload_0(v *Value) bool { } } func rewriteValueAMD64splitload_OpAMD64CMPWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (CMPWload {sym} [off] ptr x mem) @@ -182,9 +202,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - x := v.Args[1] + ptr := v_0 + x := v_1 + mem := v_2 v.reset(OpAMD64CMPW) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = off diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index b392bad1b1..76befe077b 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -840,17 +840,19 @@ func rewriteValueARM(v *Value) bool { return false } func rewriteValueARM_OpARMADC_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADC (MOVWconst [c]) x flags) // result: (ADCconst [c] x flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMMOVWconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 + flags := v_2 v.reset(OpARMADCconst) v.AuxInt = c v.AddArg(x) @@ -862,15 +864,14 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // match: (ADC x (SLLconst [c] y) flags) // result: (ADCshiftLL x y [c] flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } c := v_1.AuxInt y := v_1.Args[0] + flags := v_2 v.reset(OpARMADCshiftLL) v.AuxInt = c v.AddArg(x) @@ -883,15 +884,14 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // match: (ADC x (SRLconst [c] y) flags) // result: (ADCshiftRL x y [c] flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } c := v_1.AuxInt y := v_1.Args[0] + flags := v_2 v.reset(OpARMADCshiftRL) v.AuxInt = c v.AddArg(x) @@ -904,15 +904,14 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // match: (ADC x (SRAconst [c] y) flags) // result: (ADCshiftRA x y [c] flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } c := v_1.AuxInt y := v_1.Args[0] + flags := v_2 v.reset(OpARMADCshiftRA) v.AuxInt = c v.AddArg(x) @@ -925,15 +924,14 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // match: (ADC x (SLL y z) flags) // result: (ADCshiftLLreg x y z flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } z := v_1.Args[1] y := v_1.Args[0] + flags := v_2 v.reset(OpARMADCshiftLLreg) v.AddArg(x) v.AddArg(y) @@ -946,15 +944,14 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // match: (ADC x (SRL y z) flags) // result: (ADCshiftRLreg x y z flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } z := v_1.Args[1] y := v_1.Args[0] + flags := v_2 v.reset(OpARMADCshiftRLreg) v.AddArg(x) v.AddArg(y) @@ -967,15 +964,14 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { // match: (ADC x (SRA y z) flags) // result: (ADCshiftRAreg x y z flags) for { - flags := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } z := v_1.Args[1] y := v_1.Args[0] + flags := v_2 v.reset(OpARMADCshiftRAreg) v.AddArg(x) v.AddArg(y) @@ -988,17 +984,18 @@ func rewriteValueARM_OpARMADC_0(v *Value) bool { return false } func rewriteValueARM_OpARMADCconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADCconst [c] (ADDconst [d] x) flags) // result: (ADCconst [int64(int32(c+d))] x flags) for { c := v.AuxInt - flags := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } d := v_0.AuxInt x := v_0.Args[0] + flags := v_1 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c + d)) v.AddArg(x) @@ -1009,13 +1006,12 @@ func rewriteValueARM_OpARMADCconst_0(v *Value) bool { // result: (ADCconst [int64(int32(c-d))] x flags) for { c := v.AuxInt - flags := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } d := v_0.AuxInt x := v_0.Args[0] + flags := v_1 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c - d)) v.AddArg(x) @@ -1025,18 +1021,20 @@ func rewriteValueARM_OpARMADCconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMADCshiftLL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADCshiftLL (MOVWconst [c]) x [d] flags) // result: (ADCconst [c] (SLLconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -1050,13 +1048,12 @@ func rewriteValueARM_OpARMADCshiftLL_0(v *Value) bool { // result: (ADCconst x [int64(int32(uint32(c)< x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -1090,14 +1090,13 @@ func rewriteValueARM_OpARMADCshiftLLreg_0(v *Value) bool { // match: (ADCshiftLLreg x y (MOVWconst [c]) flags) // result: (ADCshiftLL x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMADCshiftLL) v.AuxInt = c v.AddArg(x) @@ -1108,18 +1107,20 @@ func rewriteValueARM_OpARMADCshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADCshiftRA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADCshiftRA (MOVWconst [c]) x [d] flags) // result: (ADCconst [c] (SRAconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -1133,13 +1134,12 @@ func rewriteValueARM_OpARMADCshiftRA_0(v *Value) bool { // result: (ADCconst x [int64(int32(c)>>uint64(d))] flags) for { d := v.AuxInt - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) @@ -1149,18 +1149,21 @@ func rewriteValueARM_OpARMADCshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMADCshiftRAreg_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADCshiftRAreg (MOVWconst [c]) x y flags) // result: (ADCconst [c] (SRA x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -1173,14 +1176,13 @@ func rewriteValueARM_OpARMADCshiftRAreg_0(v *Value) bool { // match: (ADCshiftRAreg x y (MOVWconst [c]) flags) // result: (ADCshiftRA x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMADCshiftRA) v.AuxInt = c v.AddArg(x) @@ -1191,18 +1193,20 @@ func rewriteValueARM_OpARMADCshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADCshiftRL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADCshiftRL (MOVWconst [c]) x [d] flags) // result: (ADCconst [c] (SRLconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -1216,13 +1220,12 @@ func rewriteValueARM_OpARMADCshiftRL_0(v *Value) bool { // result: (ADCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) for { d := v.AuxInt - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMADCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) @@ -1232,18 +1235,21 @@ func rewriteValueARM_OpARMADCshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMADCshiftRLreg_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADCshiftRLreg (MOVWconst [c]) x y flags) // result: (ADCconst [c] (SRL x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMADCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -1256,14 +1262,13 @@ func rewriteValueARM_OpARMADCshiftRLreg_0(v *Value) bool { // match: (ADCshiftRLreg x y (MOVWconst [c]) flags) // result: (ADCshiftRL x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMADCshiftRL) v.AuxInt = c v.AddArg(x) @@ -1274,14 +1279,14 @@ func rewriteValueARM_OpARMADCshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADD x (MOVWconst [c])) // result: (ADDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -1296,10 +1301,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (SLLconst [c] y)) // result: (ADDshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -1316,10 +1319,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (SRLconst [c] y)) // result: (ADDshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -1336,10 +1337,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (SRAconst [c] y)) // result: (ADDshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -1356,10 +1355,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (SLL y z)) // result: (ADDshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -1376,10 +1373,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (SRL y z)) // result: (ADDshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -1396,10 +1391,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (SRA y z)) // result: (ADDshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -1416,10 +1409,8 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD x (RSBconst [0] y)) // result: (SUB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { continue } @@ -1435,15 +1426,12 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // result: (RSBconst [c+d] (ADD x y)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMRSBconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARMRSBconst { continue } @@ -1462,15 +1450,13 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { // match: (ADD (MUL x y) a) // result: (MULA x y a) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMMUL { continue } y := v_0.Args[1] x := v_0.Args[0] - a := v.Args[1^_i0] + a := v_1 v.reset(OpARMMULA) v.AddArg(x) v.AddArg(y) @@ -1482,14 +1468,14 @@ func rewriteValueARM_OpARMADD_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDD a (MULD x y)) // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULAD a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARMMULD { continue } @@ -1510,10 +1496,8 @@ func rewriteValueARM_OpARMADDD_0(v *Value) bool { // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULSD a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARMNMULD { continue } @@ -1533,14 +1517,14 @@ func rewriteValueARM_OpARMADDD_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDF_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDF a (MULF x y)) // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULAF a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARMMULF { continue } @@ -1561,10 +1545,8 @@ func rewriteValueARM_OpARMADDF_0(v *Value) bool { // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULSF a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARMNMULF { continue } @@ -1584,13 +1566,13 @@ func rewriteValueARM_OpARMADDF_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDS x (MOVWconst [c])) // result: (ADDSconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -1605,10 +1587,8 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (SLLconst [c] y)) // result: (ADDSshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -1625,10 +1605,8 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (SRLconst [c] y)) // result: (ADDSshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -1645,10 +1623,8 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (SRAconst [c] y)) // result: (ADDSshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -1665,10 +1641,8 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (SLL y z)) // result: (ADDSshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -1685,10 +1659,8 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (SRL y z)) // result: (ADDSshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -1705,10 +1677,8 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (SRA y z)) // result: (ADDSshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -1725,17 +1695,18 @@ func rewriteValueARM_OpARMADDS_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDSshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDSshiftLL (MOVWconst [c]) x [d]) // result: (ADDSconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -1748,9 +1719,7 @@ func rewriteValueARM_OpARMADDSshiftLL_0(v *Value) bool { // result: (ADDSconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -1785,10 +1756,8 @@ func rewriteValueARM_OpARMADDSshiftLLreg_0(v *Value) bool { // match: (ADDSshiftLLreg x y (MOVWconst [c])) // result: (ADDSshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -1802,17 +1771,18 @@ func rewriteValueARM_OpARMADDSshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDSshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDSshiftRA (MOVWconst [c]) x [d]) // result: (ADDSconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -1825,9 +1795,7 @@ func rewriteValueARM_OpARMADDSshiftRA_0(v *Value) bool { // result: (ADDSconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -1840,17 +1808,19 @@ func rewriteValueARM_OpARMADDSshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDSshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDSshiftRAreg (MOVWconst [c]) x y) // result: (ADDSconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -1862,10 +1832,8 @@ func rewriteValueARM_OpARMADDSshiftRAreg_0(v *Value) bool { // match: (ADDSshiftRAreg x y (MOVWconst [c])) // result: (ADDSshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -1879,17 +1847,18 @@ func rewriteValueARM_OpARMADDSshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDSshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDSshiftRL (MOVWconst [c]) x [d]) // result: (ADDSconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -1902,9 +1871,7 @@ func rewriteValueARM_OpARMADDSshiftRL_0(v *Value) bool { // result: (ADDSconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -1917,17 +1884,19 @@ func rewriteValueARM_OpARMADDSshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDSshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDSshiftRLreg (MOVWconst [c]) x y) // result: (ADDSconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMADDSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -1939,10 +1908,8 @@ func rewriteValueARM_OpARMADDSshiftRLreg_0(v *Value) bool { // match: (ADDSshiftRLreg x y (MOVWconst [c])) // result: (ADDSshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -1956,11 +1923,11 @@ func rewriteValueARM_OpARMADDSshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) // result: (MOVWaddr [off1+off2] {sym} ptr) for { off1 := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } @@ -1979,7 +1946,7 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1990,7 +1957,7 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { // result: (SUBconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) { break } @@ -2004,7 +1971,7 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { // result: (SUBconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } @@ -2017,7 +1984,6 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { // result: (MOVWconst [int64(int32(c+d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -2030,7 +1996,6 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { // result: (ADDconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } @@ -2045,7 +2010,6 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { // result: (ADDconst [int64(int32(c-d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } @@ -2060,7 +2024,6 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { // result: (RSBconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMRSBconst { break } @@ -2074,18 +2037,19 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDshiftLL (MOVWconst [c]) x [d]) // result: (ADDconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -2098,9 +2062,7 @@ func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { // result: (ADDconst x [int64(int32(uint32(c)< [8] (BFXU [armBFAuxInt(8, 8)] x) x) // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { break } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) || x != v_0.Args[0] { + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARMREV16) @@ -2143,16 +2106,15 @@ func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { - break - } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 || x != v_0_0.Args[0] || !(objabi.GOARM >= 6) { + if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 { + break + } + x := v_0_0.Args[0] + if x != v_1 || !(objabi.GOARM >= 6) { break } v.reset(OpARMREV16) @@ -2162,17 +2124,19 @@ func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDshiftLLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftLLreg (MOVWconst [c]) x y) // result: (ADDconst [c] (SLL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -2184,10 +2148,8 @@ func rewriteValueARM_OpARMADDshiftLLreg_0(v *Value) bool { // match: (ADDshiftLLreg x y (MOVWconst [c])) // result: (ADDshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -2201,17 +2163,18 @@ func rewriteValueARM_OpARMADDshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftRA (MOVWconst [c]) x [d]) // result: (ADDconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -2224,9 +2187,7 @@ func rewriteValueARM_OpARMADDshiftRA_0(v *Value) bool { // result: (ADDconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -2239,17 +2200,19 @@ func rewriteValueARM_OpARMADDshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftRAreg (MOVWconst [c]) x y) // result: (ADDconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -2261,10 +2224,8 @@ func rewriteValueARM_OpARMADDshiftRAreg_0(v *Value) bool { // match: (ADDshiftRAreg x y (MOVWconst [c])) // result: (ADDshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -2278,17 +2239,18 @@ func rewriteValueARM_OpARMADDshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftRL (MOVWconst [c]) x [d]) // result: (ADDconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -2301,9 +2263,7 @@ func rewriteValueARM_OpARMADDshiftRL_0(v *Value) bool { // result: (ADDconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -2317,9 +2277,11 @@ func rewriteValueARM_OpARMADDshiftRL_0(v *Value) bool { // result: (SRRconst [ c] x) for { c := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst || v_0.AuxInt != 32-c || x != v_0.Args[0] { + if v_0.Op != OpARMSLLconst || v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARMSRRconst) @@ -2330,17 +2292,19 @@ func rewriteValueARM_OpARMADDshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMADDshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftRLreg (MOVWconst [c]) x y) // result: (ADDconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -2352,10 +2316,8 @@ func rewriteValueARM_OpARMADDshiftRLreg_0(v *Value) bool { // match: (ADDshiftRLreg x y (MOVWconst [c])) // result: (ADDshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -2369,13 +2331,13 @@ func rewriteValueARM_OpARMADDshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMAND_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AND x (MOVWconst [c])) // result: (ANDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -2390,10 +2352,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (SLLconst [c] y)) // result: (ANDshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -2410,10 +2370,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (SRLconst [c] y)) // result: (ANDshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -2430,10 +2388,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (SRAconst [c] y)) // result: (ANDshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -2450,10 +2406,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (SLL y z)) // result: (ANDshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -2470,10 +2424,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (SRL y z)) // result: (ANDshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -2490,10 +2442,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (SRA y z)) // result: (ANDshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -2510,8 +2460,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -2522,10 +2472,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (MVN y)) // result: (BIC x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMVN { continue } @@ -2540,10 +2488,8 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { // match: (AND x (MVNshiftLL y [c])) // result: (BICshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMVNshiftLL { continue } @@ -2560,13 +2506,13 @@ func rewriteValueARM_OpARMAND_0(v *Value) bool { return false } func rewriteValueARM_OpARMAND_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AND x (MVNshiftRL y [c])) // result: (BICshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMVNshiftRL { continue } @@ -2583,10 +2529,8 @@ func rewriteValueARM_OpARMAND_10(v *Value) bool { // match: (AND x (MVNshiftRA y [c])) // result: (BICshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMVNshiftRA { continue } @@ -2603,6 +2547,7 @@ func rewriteValueARM_OpARMAND_10(v *Value) bool { return false } func rewriteValueARM_OpARMANDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [0] _) // result: (MOVWconst [0]) for { @@ -2618,7 +2563,7 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == -1) { break } @@ -2632,7 +2577,7 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { // result: (BICconst [int64(int32(^uint32(c)))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) { break } @@ -2646,7 +2591,7 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { // result: (BICconst [int64(int32(^uint32(c)))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } @@ -2659,7 +2604,6 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { // result: (MOVWconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -2672,7 +2616,6 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { // result: (ANDconst [c&d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMANDconst { break } @@ -2686,17 +2629,18 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMANDshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftLL (MOVWconst [c]) x [d]) // result: (ANDconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -2709,9 +2653,7 @@ func rewriteValueARM_OpARMANDshiftLL_0(v *Value) bool { // result: (ANDconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -2766,10 +2709,8 @@ func rewriteValueARM_OpARMANDshiftLLreg_0(v *Value) bool { // match: (ANDshiftLLreg x y (MOVWconst [c])) // result: (ANDshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -2783,17 +2724,18 @@ func rewriteValueARM_OpARMANDshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMANDshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftRA (MOVWconst [c]) x [d]) // result: (ANDconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -2806,9 +2748,7 @@ func rewriteValueARM_OpARMANDshiftRA_0(v *Value) bool { // result: (ANDconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -2823,9 +2763,8 @@ func rewriteValueARM_OpARMANDshiftRA_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARMSRAconst { break } @@ -2841,17 +2780,19 @@ func rewriteValueARM_OpARMANDshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMANDshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftRAreg (MOVWconst [c]) x y) // result: (ANDconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -2863,10 +2804,8 @@ func rewriteValueARM_OpARMANDshiftRAreg_0(v *Value) bool { // match: (ANDshiftRAreg x y (MOVWconst [c])) // result: (ANDshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -2880,17 +2819,18 @@ func rewriteValueARM_OpARMANDshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMANDshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftRL (MOVWconst [c]) x [d]) // result: (ANDconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -2903,9 +2843,7 @@ func rewriteValueARM_OpARMANDshiftRL_0(v *Value) bool { // result: (ANDconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -2920,9 +2858,8 @@ func rewriteValueARM_OpARMANDshiftRL_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARMSRLconst { break } @@ -2938,17 +2875,19 @@ func rewriteValueARM_OpARMANDshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMANDshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftRLreg (MOVWconst [c]) x y) // result: (ANDconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -2960,10 +2899,8 @@ func rewriteValueARM_OpARMANDshiftRLreg_0(v *Value) bool { // match: (ANDshiftRLreg x y (MOVWconst [c])) // result: (ANDshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -2977,11 +2914,11 @@ func rewriteValueARM_OpARMANDshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMBFX_0(v *Value) bool { + v_0 := v.Args[0] // match: (BFX [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -2993,11 +2930,11 @@ func rewriteValueARM_OpARMBFX_0(v *Value) bool { return false } func rewriteValueARM_OpARMBFXU_0(v *Value) bool { + v_0 := v.Args[0] // match: (BFXU [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3009,12 +2946,12 @@ func rewriteValueARM_OpARMBFXU_0(v *Value) bool { return false } func rewriteValueARM_OpARMBIC_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BIC x (MOVWconst [c])) // result: (BICconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -3027,9 +2964,7 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (SLLconst [c] y)) // result: (BICshiftLL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } @@ -3044,9 +2979,7 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (SRLconst [c] y)) // result: (BICshiftRL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -3061,9 +2994,7 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (SRAconst [c] y)) // result: (BICshiftRA x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -3078,9 +3009,7 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (SLL y z)) // result: (BICshiftLLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLL { break } @@ -3095,9 +3024,7 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (SRL y z)) // result: (BICshiftRLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRL { break } @@ -3112,9 +3039,7 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (SRA y z)) // result: (BICshiftRAreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRA { break } @@ -3129,8 +3054,8 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x x) // result: (MOVWconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARMMOVWconst) @@ -3140,13 +3065,14 @@ func rewriteValueARM_OpARMBIC_0(v *Value) bool { return false } func rewriteValueARM_OpARMBICconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (BICconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -3169,7 +3095,7 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { // result: (ANDconst [int64(int32(^uint32(c)))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) { break } @@ -3183,7 +3109,7 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { // result: (ANDconst [int64(int32(^uint32(c)))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } @@ -3196,7 +3122,6 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { // result: (MOVWconst [d&^c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3209,7 +3134,6 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { // result: (BICconst [int64(int32(c|d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMBICconst { break } @@ -3223,13 +3147,13 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMBICshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BICshiftLL x (MOVWconst [c]) [d]) // result: (BICconst x [int64(int32(uint32(c)<>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -3302,9 +3225,7 @@ func rewriteValueARM_OpARMBICshiftRA_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -3319,13 +3240,14 @@ func rewriteValueARM_OpARMBICshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMBICshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BICshiftRAreg x y (MOVWconst [c])) // result: (BICshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -3339,13 +3261,13 @@ func rewriteValueARM_OpARMBICshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMBICshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BICshiftRL x (MOVWconst [c]) [d]) // result: (BICconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -3360,9 +3282,7 @@ func rewriteValueARM_OpARMBICshiftRL_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -3377,13 +3297,14 @@ func rewriteValueARM_OpARMBICshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMBICshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BICshiftRLreg x y (MOVWconst [c])) // result: (BICshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -3397,13 +3318,13 @@ func rewriteValueARM_OpARMBICshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMN_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMN x (MOVWconst [c])) // result: (CMNconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -3418,10 +3339,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (SLLconst [c] y)) // result: (CMNshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -3438,10 +3357,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (SRLconst [c] y)) // result: (CMNshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -3458,10 +3375,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (SRAconst [c] y)) // result: (CMNshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -3478,10 +3393,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (SLL y z)) // result: (CMNshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -3498,10 +3411,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (SRL y z)) // result: (CMNshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -3518,10 +3429,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (SRA y z)) // result: (CMNshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -3538,10 +3447,8 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { // match: (CMN x (RSBconst [0] y)) // result: (CMP x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { continue } @@ -3556,12 +3463,12 @@ func rewriteValueARM_OpARMCMN_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMNconst (MOVWconst [x]) [y]) // cond: int32(x)==int32(-y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3577,7 +3484,6 @@ func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3593,7 +3499,6 @@ func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3609,7 +3514,6 @@ func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3625,7 +3529,6 @@ func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -3639,17 +3542,18 @@ func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMNshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftLL (MOVWconst [c]) x [d]) // result: (CMNconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -3662,9 +3566,7 @@ func rewriteValueARM_OpARMCMNshiftLL_0(v *Value) bool { // result: (CMNconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -3699,10 +3603,8 @@ func rewriteValueARM_OpARMCMNshiftLLreg_0(v *Value) bool { // match: (CMNshiftLLreg x y (MOVWconst [c])) // result: (CMNshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -3716,17 +3618,18 @@ func rewriteValueARM_OpARMCMNshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMNshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftRA (MOVWconst [c]) x [d]) // result: (CMNconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -3739,9 +3642,7 @@ func rewriteValueARM_OpARMCMNshiftRA_0(v *Value) bool { // result: (CMNconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -3754,17 +3655,19 @@ func rewriteValueARM_OpARMCMNshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMNshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftRAreg (MOVWconst [c]) x y) // result: (CMNconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -3776,10 +3679,8 @@ func rewriteValueARM_OpARMCMNshiftRAreg_0(v *Value) bool { // match: (CMNshiftRAreg x y (MOVWconst [c])) // result: (CMNshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -3793,17 +3694,18 @@ func rewriteValueARM_OpARMCMNshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMNshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftRL (MOVWconst [c]) x [d]) // result: (CMNconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -3816,9 +3718,7 @@ func rewriteValueARM_OpARMCMNshiftRL_0(v *Value) bool { // result: (CMNconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -3831,17 +3731,19 @@ func rewriteValueARM_OpARMCMNshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMNshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftRLreg (MOVWconst [c]) x y) // result: (CMNconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMCMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -3853,10 +3755,8 @@ func rewriteValueARM_OpARMCMNshiftRLreg_0(v *Value) bool { // match: (CMNshiftRLreg x y (MOVWconst [c])) // result: (CMNshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -3870,12 +3770,12 @@ func rewriteValueARM_OpARMCMNshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMOVWHSconst _ (FlagEQ) [c]) // result: (MOVWconst [c]) for { c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARMFlagEQ { break } @@ -3886,9 +3786,7 @@ func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { // match: (CMOVWHSconst x (FlagLT_ULT)) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMFlagLT_ULT { break } @@ -3901,8 +3799,6 @@ func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { // result: (MOVWconst [c]) for { c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARMFlagLT_UGT { break } @@ -3913,9 +3809,7 @@ func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { // match: (CMOVWHSconst x (FlagGT_ULT)) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMFlagGT_ULT { break } @@ -3928,8 +3822,6 @@ func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { // result: (MOVWconst [c]) for { c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARMFlagGT_UGT { break } @@ -3941,9 +3833,7 @@ func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { // result: (CMOVWLSconst x flags [c]) for { c := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMInvertFlags { break } @@ -3957,12 +3847,12 @@ func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMOVWLSconst _ (FlagEQ) [c]) // result: (MOVWconst [c]) for { c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARMFlagEQ { break } @@ -3974,8 +3864,6 @@ func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { // result: (MOVWconst [c]) for { c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARMFlagLT_ULT { break } @@ -3986,9 +3874,7 @@ func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { // match: (CMOVWLSconst x (FlagLT_UGT)) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMFlagLT_UGT { break } @@ -4001,8 +3887,6 @@ func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { // result: (MOVWconst [c]) for { c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARMFlagGT_ULT { break } @@ -4013,9 +3897,7 @@ func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { // match: (CMOVWLSconst x (FlagGT_UGT)) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMFlagGT_UGT { break } @@ -4028,9 +3910,7 @@ func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { // result: (CMOVWHSconst x flags [c]) for { c := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMInvertFlags { break } @@ -4044,13 +3924,13 @@ func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMP_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMP x (MOVWconst [c])) // result: (CMPconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -4063,12 +3943,11 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP (MOVWconst [c]) x) // result: (InvertFlags (CMPconst [c] x)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4079,9 +3958,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP x (SLLconst [c] y)) // result: (CMPshiftLL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } @@ -4096,13 +3973,12 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP (SLLconst [c] y) x) // result: (InvertFlags (CMPshiftLL x y [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c @@ -4114,9 +3990,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP x (SRLconst [c] y)) // result: (CMPshiftRL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -4131,13 +4005,12 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP (SRLconst [c] y) x) // result: (InvertFlags (CMPshiftRL x y [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c @@ -4149,9 +4022,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP x (SRAconst [c] y)) // result: (CMPshiftRA x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -4166,13 +4037,12 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP (SRAconst [c] y) x) // result: (InvertFlags (CMPshiftRA x y [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c @@ -4184,9 +4054,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP x (SLL y z)) // result: (CMPshiftLLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLL { break } @@ -4201,13 +4069,12 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { // match: (CMP (SLL y z) x) // result: (InvertFlags (CMPshiftLLreg x y z)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -4219,13 +4086,13 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMP_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMP x (SRL y z)) // result: (CMPshiftRLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRL { break } @@ -4240,13 +4107,12 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool { // match: (CMP (SRL y z) x) // result: (InvertFlags (CMPshiftRLreg x y z)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -4258,9 +4124,7 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool { // match: (CMP x (SRA y z)) // result: (CMPshiftRAreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRA { break } @@ -4275,13 +4139,12 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool { // match: (CMP (SRA y z) x) // result: (InvertFlags (CMPshiftRAreg x y z)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRA { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -4293,9 +4156,7 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool { // match: (CMP x (RSBconst [0] y)) // result: (CMN x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMRSBconst || v_1.AuxInt != 0 { break } @@ -4308,12 +4169,12 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool { return false } func rewriteValueARM_OpARMCMPD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMPD x (MOVDconst [0])) // result: (CMPD0 x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVDconst || v_1.AuxInt != 0 { break } @@ -4324,12 +4185,12 @@ func rewriteValueARM_OpARMCMPD_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPF_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMPF x (MOVFconst [0])) // result: (CMPF0 x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVFconst || v_1.AuxInt != 0 { break } @@ -4340,12 +4201,12 @@ func rewriteValueARM_OpARMCMPF_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPconst (MOVWconst [x]) [y]) // cond: int32(x)==int32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -4361,7 +4222,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -4377,7 +4237,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -4393,7 +4252,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -4409,7 +4267,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -4425,7 +4282,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVBUreg || !(0xff < c) { break } @@ -4437,7 +4293,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVHUreg || !(0xffff < c) { break } @@ -4449,7 +4304,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMANDconst { break } @@ -4465,7 +4319,6 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } @@ -4479,17 +4332,18 @@ func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftLL (MOVWconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SLLconst x [d]))) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4504,9 +4358,7 @@ func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool { // result: (CMPconst x [int64(int32(uint32(c)< x y))) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4543,10 +4397,8 @@ func rewriteValueARM_OpARMCMPshiftLLreg_0(v *Value) bool { // match: (CMPshiftLLreg x y (MOVWconst [c])) // result: (CMPshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -4560,17 +4412,18 @@ func rewriteValueARM_OpARMCMPshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftRA (MOVWconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SRAconst x [d]))) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4585,9 +4438,7 @@ func rewriteValueARM_OpARMCMPshiftRA_0(v *Value) bool { // result: (CMPconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -4600,17 +4451,19 @@ func rewriteValueARM_OpARMCMPshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftRAreg (MOVWconst [c]) x y) // result: (InvertFlags (CMPconst [c] (SRA x y))) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4624,10 +4477,8 @@ func rewriteValueARM_OpARMCMPshiftRAreg_0(v *Value) bool { // match: (CMPshiftRAreg x y (MOVWconst [c])) // result: (CMPshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -4641,17 +4492,18 @@ func rewriteValueARM_OpARMCMPshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftRL (MOVWconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SRLconst x [d]))) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4666,9 +4518,7 @@ func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool { // result: (CMPconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -4681,17 +4531,19 @@ func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMCMPshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftRLreg (MOVWconst [c]) x y) // result: (InvertFlags (CMPconst [c] (SRL x y))) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -4705,10 +4557,8 @@ func rewriteValueARM_OpARMCMPshiftRLreg_0(v *Value) bool { // match: (CMPshiftRLreg x y (MOVWconst [c])) // result: (CMPshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -4722,10 +4572,10 @@ func rewriteValueARM_OpARMCMPshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (Equal (FlagEQ)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -4736,7 +4586,6 @@ func rewriteValueARM_OpARMEqual_0(v *Value) bool { // match: (Equal (FlagLT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -4747,7 +4596,6 @@ func rewriteValueARM_OpARMEqual_0(v *Value) bool { // match: (Equal (FlagLT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -4758,7 +4606,6 @@ func rewriteValueARM_OpARMEqual_0(v *Value) bool { // match: (Equal (FlagGT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -4769,7 +4616,6 @@ func rewriteValueARM_OpARMEqual_0(v *Value) bool { // match: (Equal (FlagGT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -4780,7 +4626,6 @@ func rewriteValueARM_OpARMEqual_0(v *Value) bool { // match: (Equal (InvertFlags x)) // result: (Equal x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -4792,10 +4637,10 @@ func rewriteValueARM_OpARMEqual_0(v *Value) bool { return false } func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterEqual (FlagEQ)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -4806,7 +4651,6 @@ func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagLT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -4817,7 +4661,6 @@ func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagLT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -4828,7 +4671,6 @@ func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagGT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -4839,7 +4681,6 @@ func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagGT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -4850,7 +4691,6 @@ func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { // match: (GreaterEqual (InvertFlags x)) // result: (LessEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -4862,10 +4702,10 @@ func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { return false } func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterEqualU (FlagEQ)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -4876,7 +4716,6 @@ func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagLT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -4887,7 +4726,6 @@ func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagLT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -4898,7 +4736,6 @@ func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagGT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -4909,7 +4746,6 @@ func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagGT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -4920,7 +4756,6 @@ func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (InvertFlags x)) // result: (LessEqualU x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -4932,10 +4767,10 @@ func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { return false } func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterThan (FlagEQ)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -4946,7 +4781,6 @@ func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagLT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -4957,7 +4791,6 @@ func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagLT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -4968,7 +4801,6 @@ func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagGT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -4979,7 +4811,6 @@ func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagGT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -4990,7 +4821,6 @@ func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { // match: (GreaterThan (InvertFlags x)) // result: (LessThan x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -5002,10 +4832,10 @@ func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { return false } func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterThanU (FlagEQ)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -5016,7 +4846,6 @@ func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagLT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -5027,7 +4856,6 @@ func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagLT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -5038,7 +4866,6 @@ func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagGT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -5049,7 +4876,6 @@ func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagGT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -5060,7 +4886,6 @@ func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { // match: (GreaterThanU (InvertFlags x)) // result: (LessThanU x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -5072,10 +4897,10 @@ func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { return false } func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessEqual (FlagEQ)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -5086,7 +4911,6 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { // match: (LessEqual (FlagLT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -5097,7 +4921,6 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { // match: (LessEqual (FlagLT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -5108,7 +4931,6 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { // match: (LessEqual (FlagGT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -5119,7 +4941,6 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { // match: (LessEqual (FlagGT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -5130,7 +4951,6 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { // match: (LessEqual (InvertFlags x)) // result: (GreaterEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -5142,10 +4962,10 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { return false } func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessEqualU (FlagEQ)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -5156,7 +4976,6 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagLT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -5167,7 +4986,6 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagLT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -5178,7 +4996,6 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagGT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -5189,7 +5006,6 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagGT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -5200,7 +5016,6 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { // match: (LessEqualU (InvertFlags x)) // result: (GreaterEqualU x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -5212,10 +5027,10 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { return false } func rewriteValueARM_OpARMLessThan_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessThan (FlagEQ)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -5226,7 +5041,6 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { // match: (LessThan (FlagLT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -5237,7 +5051,6 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { // match: (LessThan (FlagLT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -5248,7 +5061,6 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { // match: (LessThan (FlagGT_ULT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -5259,7 +5071,6 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { // match: (LessThan (FlagGT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -5270,7 +5081,6 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { // match: (LessThan (InvertFlags x)) // result: (GreaterThan x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -5282,10 +5092,10 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { return false } func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessThanU (FlagEQ)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -5296,7 +5106,6 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { // match: (LessThanU (FlagLT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -5307,7 +5116,6 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { // match: (LessThanU (FlagLT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -5318,7 +5126,6 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { // match: (LessThanU (FlagGT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -5329,7 +5136,6 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { // match: (LessThanU (FlagGT_UGT)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -5340,7 +5146,6 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { // match: (LessThanU (InvertFlags x)) // result: (GreaterThanU x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -5352,18 +5157,19 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) // result: (MOVBUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = sym @@ -5376,13 +5182,12 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVBUload) v.AuxInt = off1 - off2 v.Aux = sym @@ -5396,14 +5201,13 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -5420,9 +5224,7 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVBstore { break } @@ -5446,13 +5248,12 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -5468,8 +5269,6 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -5480,14 +5279,15 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) // cond: isSamePtr(ptr, ptr2) // result: (MOVBUreg x) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVBstoreidx { break } @@ -5507,13 +5307,12 @@ func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { // match: (MOVBUloadidx ptr (MOVWconst [c]) mem) // result: (MOVBUload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVBUload) v.AuxInt = c v.AddArg(ptr) @@ -5523,13 +5322,12 @@ func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) // result: (MOVBUload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVBUload) v.AuxInt = c v.AddArg(ptr) @@ -5539,10 +5337,11 @@ func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBUreg x:(MOVBUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBUload { break } @@ -5554,7 +5353,6 @@ func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { // match: (MOVBUreg (ANDconst [c] x)) // result: (ANDconst [c&0xff] x) for { - v_0 := v.Args[0] if v_0.Op != OpARMANDconst { break } @@ -5568,7 +5366,7 @@ func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { // match: (MOVBUreg x:(MOVBUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBUreg { break } @@ -5579,7 +5377,6 @@ func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { // match: (MOVBUreg (MOVWconst [c])) // result: (MOVWconst [int64(uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -5591,18 +5388,19 @@ func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // result: (MOVBload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = sym @@ -5615,13 +5413,12 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVBload) v.AuxInt = off1 - off2 v.Aux = sym @@ -5635,14 +5432,13 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -5659,9 +5455,7 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVBstore { break } @@ -5685,13 +5479,12 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -5704,14 +5497,15 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) // cond: isSamePtr(ptr, ptr2) // result: (MOVBreg x) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVBstoreidx { break } @@ -5731,13 +5525,12 @@ func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { // match: (MOVBloadidx ptr (MOVWconst [c]) mem) // result: (MOVBload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVBload) v.AuxInt = c v.AddArg(ptr) @@ -5747,13 +5540,12 @@ func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { // match: (MOVBloadidx (MOVWconst [c]) ptr mem) // result: (MOVBload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVBload) v.AuxInt = c v.AddArg(ptr) @@ -5763,10 +5555,11 @@ func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBreg x:(MOVBload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBload { break } @@ -5779,7 +5572,6 @@ func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { // cond: c & 0x80 == 0 // result: (ANDconst [c&0x7f] x) for { - v_0 := v.Args[0] if v_0.Op != OpARMANDconst { break } @@ -5796,7 +5588,7 @@ func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { // match: (MOVBreg x:(MOVBreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBreg { break } @@ -5807,7 +5599,6 @@ func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { // match: (MOVBreg (MOVWconst [c])) // result: (MOVWconst [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -5819,19 +5610,21 @@ func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym @@ -5845,14 +5638,13 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = off1 - off2 v.Aux = sym @@ -5867,15 +5659,14 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -5892,13 +5683,12 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVBreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym @@ -5912,13 +5702,12 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVBUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym @@ -5932,13 +5721,12 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym @@ -5952,13 +5740,12 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = off v.Aux = sym @@ -5975,14 +5762,13 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(sym == nil) { break } @@ -5996,17 +5782,20 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem) // result: (MOVBstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVBstore) v.AuxInt = c v.AddArg(ptr) @@ -6017,14 +5806,13 @@ func rewriteValueARM_OpARMMOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) // result: (MOVBstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARMMOVBstore) v.AuxInt = c v.AddArg(ptr) @@ -6035,18 +5823,19 @@ func rewriteValueARM_OpARMMOVBstoreidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // result: (MOVDload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVDload) v.AuxInt = off1 + off2 v.Aux = sym @@ -6059,13 +5848,12 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVDload) v.AuxInt = off1 - off2 v.Aux = sym @@ -6079,14 +5867,13 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -6103,9 +5890,7 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVDstore { break } @@ -6125,19 +5910,21 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVDstore) v.AuxInt = off1 + off2 v.Aux = sym @@ -6151,14 +5938,13 @@ func rewriteValueARM_OpARMMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVDstore) v.AuxInt = off1 - off2 v.Aux = sym @@ -6173,15 +5959,14 @@ func rewriteValueARM_OpARMMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -6196,18 +5981,19 @@ func rewriteValueARM_OpARMMOVDstore_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) // result: (MOVFload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = sym @@ -6220,13 +6006,12 @@ func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVFload) v.AuxInt = off1 - off2 v.Aux = sym @@ -6240,14 +6025,13 @@ func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -6264,9 +6048,7 @@ func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVFstore { break } @@ -6286,19 +6068,21 @@ func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) // result: (MOVFstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVFstore) v.AuxInt = off1 + off2 v.Aux = sym @@ -6312,14 +6096,13 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVFstore) v.AuxInt = off1 - off2 v.Aux = sym @@ -6334,15 +6117,14 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -6357,6 +6139,8 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -6364,13 +6148,12 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVHUload) v.AuxInt = off1 + off2 v.Aux = sym @@ -6383,13 +6166,12 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVHUload) v.AuxInt = off1 - off2 v.Aux = sym @@ -6403,14 +6185,13 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -6427,9 +6208,7 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVHstore { break } @@ -6453,13 +6232,12 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -6475,8 +6253,6 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -6487,14 +6263,15 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) // cond: isSamePtr(ptr, ptr2) // result: (MOVHUreg x) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVHstoreidx { break } @@ -6514,13 +6291,12 @@ func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { // match: (MOVHUloadidx ptr (MOVWconst [c]) mem) // result: (MOVHUload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVHUload) v.AuxInt = c v.AddArg(ptr) @@ -6530,13 +6306,12 @@ func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) // result: (MOVHUload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVHUload) v.AuxInt = c v.AddArg(ptr) @@ -6546,10 +6321,11 @@ func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHUreg x:(MOVBUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBUload { break } @@ -6561,7 +6337,7 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVHUload { break } @@ -6573,7 +6349,6 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { // match: (MOVHUreg (ANDconst [c] x)) // result: (ANDconst [c&0xffff] x) for { - v_0 := v.Args[0] if v_0.Op != OpARMANDconst { break } @@ -6587,7 +6362,7 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVBUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBUreg { break } @@ -6598,7 +6373,7 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVHUreg { break } @@ -6609,7 +6384,6 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { // match: (MOVHUreg (MOVWconst [c])) // result: (MOVWconst [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -6621,18 +6395,19 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // result: (MOVHload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = sym @@ -6645,13 +6420,12 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVHload) v.AuxInt = off1 - off2 v.Aux = sym @@ -6665,14 +6439,13 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -6689,9 +6462,7 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVHstore { break } @@ -6715,13 +6486,12 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -6734,14 +6504,15 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) // cond: isSamePtr(ptr, ptr2) // result: (MOVHreg x) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVHstoreidx { break } @@ -6761,13 +6532,12 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { // match: (MOVHloadidx ptr (MOVWconst [c]) mem) // result: (MOVHload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVHload) v.AuxInt = c v.AddArg(ptr) @@ -6777,13 +6547,12 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { // match: (MOVHloadidx (MOVWconst [c]) ptr mem) // result: (MOVHload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVHload) v.AuxInt = c v.AddArg(ptr) @@ -6793,10 +6562,11 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHreg x:(MOVBload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBload { break } @@ -6808,7 +6578,7 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBUload { break } @@ -6820,7 +6590,7 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVHload { break } @@ -6833,7 +6603,6 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // cond: c & 0x8000 == 0 // result: (ANDconst [c&0x7fff] x) for { - v_0 := v.Args[0] if v_0.Op != OpARMANDconst { break } @@ -6850,7 +6619,7 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBreg { break } @@ -6861,7 +6630,7 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVBUreg { break } @@ -6872,7 +6641,7 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARMMOVHreg { break } @@ -6883,7 +6652,6 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { // match: (MOVHreg (MOVWconst [c])) // result: (MOVWconst [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -6895,19 +6663,21 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // result: (MOVHstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVHstore) v.AuxInt = off1 + off2 v.Aux = sym @@ -6921,14 +6691,13 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVHstore) v.AuxInt = off1 - off2 v.Aux = sym @@ -6943,15 +6712,14 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -6968,13 +6736,12 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVHstore) v.AuxInt = off v.Aux = sym @@ -6988,13 +6755,12 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVHstore) v.AuxInt = off v.Aux = sym @@ -7011,14 +6777,13 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(sym == nil) { break } @@ -7032,17 +6797,20 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem) // result: (MOVHstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVHstore) v.AuxInt = c v.AddArg(ptr) @@ -7053,14 +6821,13 @@ func rewriteValueARM_OpARMMOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) // result: (MOVHstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARMMOVHstore) v.AuxInt = c v.AddArg(ptr) @@ -7071,6 +6838,8 @@ func rewriteValueARM_OpARMMOVHstoreidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -7078,13 +6847,12 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVWload) v.AuxInt = off1 + off2 v.Aux = sym @@ -7097,13 +6865,12 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 v.reset(OpARMMOVWload) v.AuxInt = off1 - off2 v.Aux = sym @@ -7117,14 +6884,13 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -7141,9 +6907,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWstore { break } @@ -7168,13 +6932,12 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -7192,14 +6955,13 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDshiftLL { break } c := v_0.AuxInt idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -7218,14 +6980,13 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDshiftRL { break } c := v_0.AuxInt idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -7244,14 +7005,13 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDshiftRA { break } c := v_0.AuxInt idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(sym == nil) { break } @@ -7268,8 +7028,6 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -7280,14 +7038,15 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) // cond: isSamePtr(ptr, ptr2) // result: x for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVWstoreidx { break } @@ -7308,13 +7067,12 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx ptr (MOVWconst [c]) mem) // result: (MOVWload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = c v.AddArg(ptr) @@ -7324,13 +7082,12 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx (MOVWconst [c]) ptr mem) // result: (MOVWload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = c v.AddArg(ptr) @@ -7340,14 +7097,13 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx ptr (SLLconst idx [c]) mem) // result: (MOVWloadshiftLL ptr idx [c] mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMSLLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c v.AddArg(ptr) @@ -7358,14 +7114,13 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx (SLLconst idx [c]) ptr mem) // result: (MOVWloadshiftLL ptr idx [c] mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c v.AddArg(ptr) @@ -7376,14 +7131,13 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx ptr (SRLconst idx [c]) mem) // result: (MOVWloadshiftRL ptr idx [c] mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMSRLconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c v.AddArg(ptr) @@ -7394,14 +7148,13 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx (SRLconst idx [c]) ptr mem) // result: (MOVWloadshiftRL ptr idx [c] mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVWloadshiftRL) v.AuxInt = c v.AddArg(ptr) @@ -7412,14 +7165,13 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx ptr (SRAconst idx [c]) mem) // result: (MOVWloadshiftRA ptr idx [c] mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMSRAconst { break } c := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c v.AddArg(ptr) @@ -7430,14 +7182,13 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx (SRAconst idx [c]) ptr mem) // result: (MOVWloadshiftRA ptr idx [c] mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARMMOVWloadshiftRA) v.AuxInt = c v.AddArg(ptr) @@ -7448,15 +7199,16 @@ func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWloadshiftLL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) // cond: c==d && isSamePtr(ptr, ptr2) // result: x for { c := v.AuxInt - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVWstoreshiftLL { break } @@ -7479,13 +7231,12 @@ func rewriteValueARM_OpARMMOVWloadshiftLL_0(v *Value) bool { // result: (MOVWload [int64(uint32(c)<>uint64(d))] ptr mem) for { d := v.AuxInt - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(ptr) @@ -7542,15 +7293,16 @@ func rewriteValueARM_OpARMMOVWloadshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWloadshiftRL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) // cond: c==d && isSamePtr(ptr, ptr2) // result: x for { c := v.AuxInt - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARMMOVWstoreshiftRL { break } @@ -7573,13 +7325,12 @@ func rewriteValueARM_OpARMMOVWloadshiftRL_0(v *Value) bool { // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem) for { d := v.AuxInt - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARMMOVWload) v.AuxInt = int64(uint32(c) >> uint64(d)) v.AddArg(ptr) @@ -7589,11 +7340,12 @@ func rewriteValueARM_OpARMMOVWloadshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg x) // cond: x.Uses == 1 // result: (MOVWnop x) for { - x := v.Args[0] + x := v_0 if !(x.Uses == 1) { break } @@ -7604,7 +7356,6 @@ func rewriteValueARM_OpARMMOVWreg_0(v *Value) bool { // match: (MOVWreg (MOVWconst [c])) // result: (MOVWconst [c]) for { - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -7616,19 +7367,21 @@ func rewriteValueARM_OpARMMOVWreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVWstore) v.AuxInt = off1 + off2 v.Aux = sym @@ -7642,14 +7395,13 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 v.reset(OpARMMOVWstore) v.AuxInt = off1 - off2 v.Aux = sym @@ -7664,15 +7416,14 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -7692,14 +7443,13 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(sym == nil) { break } @@ -7718,15 +7468,14 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDshiftLL { break } c := v_0.AuxInt idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(sym == nil) { break } @@ -7746,15 +7495,14 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDshiftRL { break } c := v_0.AuxInt idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(sym == nil) { break } @@ -7774,15 +7522,14 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMADDshiftRA { break } c := v_0.AuxInt idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(sym == nil) { break } @@ -7797,17 +7544,20 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem) // result: (MOVWstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = c v.AddArg(ptr) @@ -7818,14 +7568,13 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem) // result: (MOVWstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = c v.AddArg(ptr) @@ -7836,15 +7585,14 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem) // result: (MOVWstoreshiftLL ptr idx [c] val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMSLLconst { break } c := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c v.AddArg(ptr) @@ -7856,15 +7604,14 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem) // result: (MOVWstoreshiftLL ptr idx [c] val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARMMOVWstoreshiftLL) v.AuxInt = c v.AddArg(ptr) @@ -7876,15 +7623,14 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem) // result: (MOVWstoreshiftRL ptr idx [c] val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMSRLconst { break } c := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c v.AddArg(ptr) @@ -7896,15 +7642,14 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem) // result: (MOVWstoreshiftRL ptr idx [c] val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARMMOVWstoreshiftRL) v.AuxInt = c v.AddArg(ptr) @@ -7916,15 +7661,14 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem) // result: (MOVWstoreshiftRA ptr idx [c] val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMSRAconst { break } c := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c v.AddArg(ptr) @@ -7936,15 +7680,14 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem) // result: (MOVWstoreshiftRA ptr idx [c] val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARMMOVWstoreshiftRA) v.AuxInt = c v.AddArg(ptr) @@ -7956,18 +7699,21 @@ func rewriteValueARM_OpARMMOVWstoreidx_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWstoreshiftLL_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) // result: (MOVWstore [int64(uint32(c)<>uint64(d))] ptr val mem) for { d := v.AuxInt - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(ptr) @@ -8000,18 +7749,21 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVWstoreshiftRL_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem) for { d := v.AuxInt - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARMMOVWstore) v.AuxInt = int64(uint32(c) >> uint64(d)) v.AddArg(ptr) @@ -8022,15 +7774,15 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMMUL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MUL x (MOVWconst [c])) // cond: int32(c) == -1 // result: (RSBconst [0] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8048,9 +7800,7 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // match: (MUL _ (MOVWconst [0])) // result: (MOVWconst [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 0 { continue } @@ -8063,10 +7813,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // match: (MUL x (MOVWconst [1])) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { continue } @@ -8081,10 +7829,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8103,10 +7849,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (ADDshiftLL x x [log2(c-1)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8126,10 +7870,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (RSBshiftLL x x [log2(c+1)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8149,10 +7891,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8175,10 +7915,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8201,10 +7939,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8227,10 +7963,8 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -8252,17 +7986,16 @@ func rewriteValueARM_OpARMMUL_0(v *Value) bool { return false } func rewriteValueARM_OpARMMUL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MUL (MOVWconst [c]) (MOVWconst [d])) // result: (MOVWconst [int64(int32(c*d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMMOVWconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpARMMOVWconst { continue } @@ -8276,18 +8009,20 @@ func rewriteValueARM_OpARMMUL_10(v *Value) bool { return false } func rewriteValueARM_OpARMMULA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULA x (MOVWconst [c]) a) // cond: int32(c) == -1 // result: (SUB a x) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(int32(c) == -1) { break } @@ -8299,11 +8034,10 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // match: (MULA _ (MOVWconst [0]) a) // result: a for { - a := v.Args[2] - v_1 := v.Args[1] if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 0 { break } + a := v_2 v.reset(OpCopy) v.Type = a.Type v.AddArg(a) @@ -8312,12 +8046,11 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // match: (MULA x (MOVWconst [1]) a) // result: (ADD x a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { break } + a := v_2 v.reset(OpARMADD) v.AddArg(x) v.AddArg(a) @@ -8327,13 +8060,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ADD (SLLconst [log2(c)] x) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(isPowerOfTwo(c)) { break } @@ -8349,13 +8081,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (ADD (ADDshiftLL x x [log2(c-1)]) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } @@ -8372,13 +8103,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (ADD (RSBshiftLL x x [log2(c+1)]) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } @@ -8395,13 +8125,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (ADD (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } @@ -8421,13 +8150,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (ADD (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } @@ -8447,13 +8175,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (ADD (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } @@ -8473,13 +8200,12 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (ADD (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } @@ -8498,18 +8224,20 @@ func rewriteValueARM_OpARMMULA_0(v *Value) bool { return false } func rewriteValueARM_OpARMMULA_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULA (MOVWconst [c]) x a) // cond: int32(c) == -1 // result: (SUB a x) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(int32(c) == -1) { break } @@ -8521,11 +8249,10 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // match: (MULA (MOVWconst [0]) _ a) // result: a for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst || v_0.AuxInt != 0 { break } + a := v_2 v.reset(OpCopy) v.Type = a.Type v.AddArg(a) @@ -8534,12 +8261,11 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // match: (MULA (MOVWconst [1]) x a) // result: (ADD x a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst || v_0.AuxInt != 1 { break } - x := v.Args[1] + x := v_1 + a := v_2 v.reset(OpARMADD) v.AddArg(x) v.AddArg(a) @@ -8549,13 +8275,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ADD (SLLconst [log2(c)] x) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(isPowerOfTwo(c)) { break } @@ -8571,13 +8296,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (ADD (ADDshiftLL x x [log2(c-1)]) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } @@ -8594,13 +8318,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (ADD (RSBshiftLL x x [log2(c+1)]) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } @@ -8617,13 +8340,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (ADD (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } @@ -8643,13 +8365,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (ADD (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } @@ -8669,13 +8390,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (ADD (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } @@ -8695,13 +8415,12 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (ADD (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } @@ -8720,20 +8439,21 @@ func rewriteValueARM_OpARMMULA_10(v *Value) bool { return false } func rewriteValueARM_OpARMMULA_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a) // result: (ADDconst [int64(int32(c*d))] a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARMMOVWconst { break } d := v_1.AuxInt + a := v_2 v.reset(OpARMADDconst) v.AuxInt = int64(int32(c * d)) v.AddArg(a) @@ -8742,18 +8462,18 @@ func rewriteValueARM_OpARMMULA_20(v *Value) bool { return false } func rewriteValueARM_OpARMMULD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULD (NEGD x) y) // cond: objabi.GOARM >= 6 // result: (NMULD x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMNEGD { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 if !(objabi.GOARM >= 6) { continue } @@ -8767,18 +8487,18 @@ func rewriteValueARM_OpARMMULD_0(v *Value) bool { return false } func rewriteValueARM_OpARMMULF_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULF (NEGF x) y) // cond: objabi.GOARM >= 6 // result: (NMULF x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMNEGF { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 if !(objabi.GOARM >= 6) { continue } @@ -8792,18 +8512,20 @@ func rewriteValueARM_OpARMMULF_0(v *Value) bool { return false } func rewriteValueARM_OpARMMULS_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULS x (MOVWconst [c]) a) // cond: int32(c) == -1 // result: (ADD a x) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(int32(c) == -1) { break } @@ -8815,11 +8537,10 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // match: (MULS _ (MOVWconst [0]) a) // result: a for { - a := v.Args[2] - v_1 := v.Args[1] if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 0 { break } + a := v_2 v.reset(OpCopy) v.Type = a.Type v.AddArg(a) @@ -8828,12 +8549,11 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // match: (MULS x (MOVWconst [1]) a) // result: (RSB x a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst || v_1.AuxInt != 1 { break } + a := v_2 v.reset(OpARMRSB) v.AddArg(x) v.AddArg(a) @@ -8843,13 +8563,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (RSB (SLLconst [log2(c)] x) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(isPowerOfTwo(c)) { break } @@ -8865,13 +8584,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (RSB (ADDshiftLL x x [log2(c-1)]) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } @@ -8888,13 +8606,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (RSB (RSBshiftLL x x [log2(c+1)]) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } @@ -8911,13 +8628,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (RSB (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } @@ -8937,13 +8653,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (RSB (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } @@ -8963,13 +8678,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (RSB (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } @@ -8989,13 +8703,12 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (RSB (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) for { - a := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + a := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } @@ -9014,18 +8727,20 @@ func rewriteValueARM_OpARMMULS_0(v *Value) bool { return false } func rewriteValueARM_OpARMMULS_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULS (MOVWconst [c]) x a) // cond: int32(c) == -1 // result: (ADD a x) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(int32(c) == -1) { break } @@ -9037,11 +8752,10 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // match: (MULS (MOVWconst [0]) _ a) // result: a for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst || v_0.AuxInt != 0 { break } + a := v_2 v.reset(OpCopy) v.Type = a.Type v.AddArg(a) @@ -9050,12 +8764,11 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // match: (MULS (MOVWconst [1]) x a) // result: (RSB x a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst || v_0.AuxInt != 1 { break } - x := v.Args[1] + x := v_1 + a := v_2 v.reset(OpARMRSB) v.AddArg(x) v.AddArg(a) @@ -9065,13 +8778,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: isPowerOfTwo(c) // result: (RSB (SLLconst [log2(c)] x) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(isPowerOfTwo(c)) { break } @@ -9087,13 +8799,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (RSB (ADDshiftLL x x [log2(c-1)]) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } @@ -9110,13 +8821,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (RSB (RSBshiftLL x x [log2(c+1)]) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } @@ -9133,13 +8843,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (RSB (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } @@ -9159,13 +8868,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (RSB (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } @@ -9185,13 +8893,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (RSB (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } @@ -9211,13 +8918,12 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (RSB (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + a := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } @@ -9236,20 +8942,21 @@ func rewriteValueARM_OpARMMULS_10(v *Value) bool { return false } func rewriteValueARM_OpARMMULS_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a) // result: (SUBconst [int64(int32(c*d))] a) for { - a := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARMMOVWconst { break } d := v_1.AuxInt + a := v_2 v.reset(OpARMSUBconst) v.AuxInt = int64(int32(c * d)) v.AddArg(a) @@ -9258,10 +8965,10 @@ func rewriteValueARM_OpARMMULS_20(v *Value) bool { return false } func rewriteValueARM_OpARMMVN_0(v *Value) bool { + v_0 := v.Args[0] // match: (MVN (MOVWconst [c])) // result: (MOVWconst [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -9273,7 +8980,6 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (SLLconst [c] x)) // result: (MVNshiftLL x [c]) for { - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } @@ -9287,7 +8993,6 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (SRLconst [c] x)) // result: (MVNshiftRL x [c]) for { - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } @@ -9301,7 +9006,6 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (SRAconst [c] x)) // result: (MVNshiftRA x [c]) for { - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } @@ -9315,7 +9019,6 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (SLL x y)) // result: (MVNshiftLLreg x y) for { - v_0 := v.Args[0] if v_0.Op != OpARMSLL { break } @@ -9329,7 +9032,6 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (SRL x y)) // result: (MVNshiftRLreg x y) for { - v_0 := v.Args[0] if v_0.Op != OpARMSRL { break } @@ -9343,7 +9045,6 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (SRA x y)) // result: (MVNshiftRAreg x y) for { - v_0 := v.Args[0] if v_0.Op != OpARMSRA { break } @@ -9357,11 +9058,11 @@ func rewriteValueARM_OpARMMVN_0(v *Value) bool { return false } func rewriteValueARM_OpARMMVNshiftLL_0(v *Value) bool { + v_0 := v.Args[0] // match: (MVNshiftLL (MOVWconst [c]) [d]) // result: (MOVWconst [^int64(uint32(c)<>uint64(d))]) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -9407,12 +9108,12 @@ func rewriteValueARM_OpARMMVNshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMMVNshiftRAreg_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MVNshiftRAreg x (MOVWconst [c])) // result: (MVNshiftRA x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -9425,11 +9126,11 @@ func rewriteValueARM_OpARMMVNshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMVNshiftRL_0(v *Value) bool { + v_0 := v.Args[0] // match: (MVNshiftRL (MOVWconst [c]) [d]) // result: (MOVWconst [^int64(uint32(c)>>uint64(d))]) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -9441,12 +9142,12 @@ func rewriteValueARM_OpARMMVNshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMMVNshiftRLreg_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MVNshiftRLreg x (MOVWconst [c])) // result: (MVNshiftRL x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -9459,11 +9160,11 @@ func rewriteValueARM_OpARMMVNshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMNEGD_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGD (MULD x y)) // cond: objabi.GOARM >= 6 // result: (NMULD x y) for { - v_0 := v.Args[0] if v_0.Op != OpARMMULD { break } @@ -9480,11 +9181,11 @@ func rewriteValueARM_OpARMNEGD_0(v *Value) bool { return false } func rewriteValueARM_OpARMNEGF_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGF (MULF x y)) // cond: objabi.GOARM >= 6 // result: (NMULF x y) for { - v_0 := v.Args[0] if v_0.Op != OpARMMULF { break } @@ -9501,17 +9202,17 @@ func rewriteValueARM_OpARMNEGF_0(v *Value) bool { return false } func rewriteValueARM_OpARMNMULD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NMULD (NEGD x) y) // result: (MULD x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMNEGD { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARMMULD) v.AddArg(x) v.AddArg(y) @@ -9522,17 +9223,17 @@ func rewriteValueARM_OpARMNMULD_0(v *Value) bool { return false } func rewriteValueARM_OpARMNMULF_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NMULF (NEGF x) y) // result: (MULF x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMNEGF { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARMMULF) v.AddArg(x) v.AddArg(y) @@ -9543,10 +9244,10 @@ func rewriteValueARM_OpARMNMULF_0(v *Value) bool { return false } func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (NotEqual (FlagEQ)) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } @@ -9557,7 +9258,6 @@ func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { // match: (NotEqual (FlagLT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } @@ -9568,7 +9268,6 @@ func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { // match: (NotEqual (FlagLT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } @@ -9579,7 +9278,6 @@ func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { // match: (NotEqual (FlagGT_ULT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_ULT { break } @@ -9590,7 +9288,6 @@ func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { // match: (NotEqual (FlagGT_UGT)) // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } @@ -9601,7 +9298,6 @@ func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { // match: (NotEqual (InvertFlags x)) // result: (NotEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } @@ -9613,13 +9309,13 @@ func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { return false } func rewriteValueARM_OpARMOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OR x (MOVWconst [c])) // result: (ORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -9634,10 +9330,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x (SLLconst [c] y)) // result: (ORshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -9654,10 +9348,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x (SRLconst [c] y)) // result: (ORshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -9674,10 +9366,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x (SRAconst [c] y)) // result: (ORshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -9694,10 +9384,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x (SLL y z)) // result: (ORshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -9714,10 +9402,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x (SRL y z)) // result: (ORshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -9734,10 +9420,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x (SRA y z)) // result: (ORshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -9754,8 +9438,8 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { // match: (OR x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -9766,13 +9450,14 @@ func rewriteValueARM_OpARMOR_0(v *Value) bool { return false } func rewriteValueARM_OpARMORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -9794,7 +9479,6 @@ func rewriteValueARM_OpARMORconst_0(v *Value) bool { // result: (MOVWconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -9807,7 +9491,6 @@ func rewriteValueARM_OpARMORconst_0(v *Value) bool { // result: (ORconst [c|d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMORconst { break } @@ -9821,18 +9504,19 @@ func rewriteValueARM_OpARMORconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORshiftLL (MOVWconst [c]) x [d]) // result: (ORconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -9845,9 +9529,7 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { // result: (ORconst x [int64(int32(uint32(c)< [8] (BFXU [armBFAuxInt(8, 8)] x) x) // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { break } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) || x != v_0.Args[0] { + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARMREV16) @@ -9890,16 +9573,15 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { - break - } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 || x != v_0_0.Args[0] || !(objabi.GOARM >= 6) { + if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 { + break + } + x := v_0_0.Args[0] + if x != v_1 || !(objabi.GOARM >= 6) { break } v.reset(OpARMREV16) @@ -9911,9 +9593,8 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARMSLLconst { break } @@ -9929,17 +9610,19 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { return false } func rewriteValueARM_OpARMORshiftLLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftLLreg (MOVWconst [c]) x y) // result: (ORconst [c] (SLL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -9951,10 +9634,8 @@ func rewriteValueARM_OpARMORshiftLLreg_0(v *Value) bool { // match: (ORshiftLLreg x y (MOVWconst [c])) // result: (ORshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -9968,17 +9649,18 @@ func rewriteValueARM_OpARMORshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMORshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftRA (MOVWconst [c]) x [d]) // result: (ORconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -9991,9 +9673,7 @@ func rewriteValueARM_OpARMORshiftRA_0(v *Value) bool { // result: (ORconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10008,9 +9688,8 @@ func rewriteValueARM_OpARMORshiftRA_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARMSRAconst { break } @@ -10026,17 +9705,19 @@ func rewriteValueARM_OpARMORshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMORshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftRAreg (MOVWconst [c]) x y) // result: (ORconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -10048,10 +9729,8 @@ func rewriteValueARM_OpARMORshiftRAreg_0(v *Value) bool { // match: (ORshiftRAreg x y (MOVWconst [c])) // result: (ORshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10065,17 +9744,18 @@ func rewriteValueARM_OpARMORshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftRL (MOVWconst [c]) x [d]) // result: (ORconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -10088,9 +9768,7 @@ func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { // result: (ORconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10104,9 +9782,11 @@ func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { // result: (SRRconst [ c] x) for { c := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst || v_0.AuxInt != 32-c || x != v_0.Args[0] { + if v_0.Op != OpARMSLLconst || v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARMSRRconst) @@ -10119,9 +9799,8 @@ func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARMSRLconst { break } @@ -10137,17 +9816,19 @@ func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMORshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftRLreg (MOVWconst [c]) x y) // result: (ORconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -10159,10 +9840,8 @@ func rewriteValueARM_OpARMORshiftRLreg_0(v *Value) bool { // match: (ORshiftRLreg x y (MOVWconst [c])) // result: (ORshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10176,15 +9855,16 @@ func rewriteValueARM_OpARMORshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RSB (MOVWconst [c]) x) // result: (SUBconst [c] x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBconst) v.AuxInt = c v.AddArg(x) @@ -10193,9 +9873,7 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB x (MOVWconst [c])) // result: (RSBconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10208,9 +9886,7 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB x (SLLconst [c] y)) // result: (RSBshiftLL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } @@ -10225,13 +9901,12 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB (SLLconst [c] y) x) // result: (SUBshiftLL x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMSUBshiftLL) v.AuxInt = c v.AddArg(x) @@ -10241,9 +9916,7 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB x (SRLconst [c] y)) // result: (RSBshiftRL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -10258,13 +9931,12 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB (SRLconst [c] y) x) // result: (SUBshiftRL x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMSUBshiftRL) v.AuxInt = c v.AddArg(x) @@ -10274,9 +9946,7 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB x (SRAconst [c] y)) // result: (RSBshiftRA x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -10291,13 +9961,12 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB (SRAconst [c] y) x) // result: (SUBshiftRA x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMSUBshiftRA) v.AuxInt = c v.AddArg(x) @@ -10307,9 +9976,7 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB x (SLL y z)) // result: (RSBshiftLLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLL { break } @@ -10324,13 +9991,12 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { // match: (RSB (SLL y z) x) // result: (SUBshiftLLreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMSUBshiftLLreg) v.AddArg(x) v.AddArg(y) @@ -10340,12 +10006,12 @@ func rewriteValueARM_OpARMRSB_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSB_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RSB x (SRL y z)) // result: (RSBshiftRLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRL { break } @@ -10360,13 +10026,12 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { // match: (RSB (SRL y z) x) // result: (SUBshiftRLreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMSUBshiftRLreg) v.AddArg(x) v.AddArg(y) @@ -10376,9 +10041,7 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { // match: (RSB x (SRA y z)) // result: (RSBshiftRAreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRA { break } @@ -10393,13 +10056,12 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { // match: (RSB (SRA y z) x) // result: (SUBshiftRAreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRA { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMSUBshiftRAreg) v.AddArg(x) v.AddArg(y) @@ -10409,8 +10071,8 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { // match: (RSB x x) // result: (MOVWconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARMMOVWconst) @@ -10421,13 +10083,12 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { // cond: objabi.GOARM == 7 // result: (MULS x y a) for { - a := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMUL { break } y := v_0.Args[1] x := v_0.Args[0] + a := v_1 if !(objabi.GOARM == 7) { break } @@ -10440,17 +10101,18 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { return false } func rewriteValueARM_OpARMRSBSshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBSshiftLL (MOVWconst [c]) x [d]) // result: (SUBSconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -10463,9 +10125,7 @@ func rewriteValueARM_OpARMRSBSshiftLL_0(v *Value) bool { // result: (RSBSconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -10500,10 +10162,8 @@ func rewriteValueARM_OpARMRSBSshiftLLreg_0(v *Value) bool { // match: (RSBSshiftLLreg x y (MOVWconst [c])) // result: (RSBSshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10517,17 +10177,18 @@ func rewriteValueARM_OpARMRSBSshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBSshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBSshiftRA (MOVWconst [c]) x [d]) // result: (SUBSconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -10540,9 +10201,7 @@ func rewriteValueARM_OpARMRSBSshiftRA_0(v *Value) bool { // result: (RSBSconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10555,17 +10214,19 @@ func rewriteValueARM_OpARMRSBSshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBSshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBSshiftRAreg (MOVWconst [c]) x y) // result: (SUBSconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -10577,10 +10238,8 @@ func rewriteValueARM_OpARMRSBSshiftRAreg_0(v *Value) bool { // match: (RSBSshiftRAreg x y (MOVWconst [c])) // result: (RSBSshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10594,17 +10253,18 @@ func rewriteValueARM_OpARMRSBSshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBSshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBSshiftRL (MOVWconst [c]) x [d]) // result: (SUBSconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -10617,9 +10277,7 @@ func rewriteValueARM_OpARMRSBSshiftRL_0(v *Value) bool { // result: (RSBSconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10632,17 +10290,19 @@ func rewriteValueARM_OpARMRSBSshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBSshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBSshiftRLreg (MOVWconst [c]) x y) // result: (SUBSconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMSUBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -10654,10 +10314,8 @@ func rewriteValueARM_OpARMRSBSshiftRLreg_0(v *Value) bool { // match: (RSBSshiftRLreg x y (MOVWconst [c])) // result: (RSBSshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10671,11 +10329,11 @@ func rewriteValueARM_OpARMRSBSshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (RSBconst [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(c-d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -10688,7 +10346,6 @@ func rewriteValueARM_OpARMRSBconst_0(v *Value) bool { // result: (ADDconst [int64(int32(c-d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMRSBconst { break } @@ -10703,7 +10360,6 @@ func rewriteValueARM_OpARMRSBconst_0(v *Value) bool { // result: (RSBconst [int64(int32(c-d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } @@ -10718,7 +10374,6 @@ func rewriteValueARM_OpARMRSBconst_0(v *Value) bool { // result: (RSBconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } @@ -10732,17 +10387,18 @@ func rewriteValueARM_OpARMRSBconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBshiftLL (MOVWconst [c]) x [d]) // result: (SUBconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -10755,9 +10411,7 @@ func rewriteValueARM_OpARMRSBshiftLL_0(v *Value) bool { // result: (RSBconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -10811,10 +10465,8 @@ func rewriteValueARM_OpARMRSBshiftLLreg_0(v *Value) bool { // match: (RSBshiftLLreg x y (MOVWconst [c])) // result: (RSBshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10828,17 +10480,18 @@ func rewriteValueARM_OpARMRSBshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBshiftRA (MOVWconst [c]) x [d]) // result: (SUBconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -10851,9 +10504,7 @@ func rewriteValueARM_OpARMRSBshiftRA_0(v *Value) bool { // result: (RSBconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10868,9 +10519,7 @@ func rewriteValueARM_OpARMRSBshiftRA_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -10885,17 +10534,19 @@ func rewriteValueARM_OpARMRSBshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBshiftRAreg (MOVWconst [c]) x y) // result: (SUBconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -10907,10 +10558,8 @@ func rewriteValueARM_OpARMRSBshiftRAreg_0(v *Value) bool { // match: (RSBshiftRAreg x y (MOVWconst [c])) // result: (RSBshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -10924,17 +10573,18 @@ func rewriteValueARM_OpARMRSBshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBshiftRL (MOVWconst [c]) x [d]) // result: (SUBconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -10947,9 +10597,7 @@ func rewriteValueARM_OpARMRSBshiftRL_0(v *Value) bool { // result: (RSBconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -10964,9 +10612,7 @@ func rewriteValueARM_OpARMRSBshiftRL_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -10981,17 +10627,19 @@ func rewriteValueARM_OpARMRSBshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSBshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSBshiftRLreg (MOVWconst [c]) x y) // result: (SUBconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMSUBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -11003,10 +10651,8 @@ func rewriteValueARM_OpARMRSBshiftRLreg_0(v *Value) bool { // match: (RSBshiftRLreg x y (MOVWconst [c])) // result: (RSBshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -11020,17 +10666,18 @@ func rewriteValueARM_OpARMRSBshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSCconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RSCconst [c] (ADDconst [d] x) flags) // result: (RSCconst [int64(int32(c-d))] x flags) for { c := v.AuxInt - flags := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } d := v_0.AuxInt x := v_0.Args[0] + flags := v_1 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c - d)) v.AddArg(x) @@ -11041,13 +10688,12 @@ func rewriteValueARM_OpARMRSCconst_0(v *Value) bool { // result: (RSCconst [int64(int32(c+d))] x flags) for { c := v.AuxInt - flags := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } d := v_0.AuxInt x := v_0.Args[0] + flags := v_1 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c + d)) v.AddArg(x) @@ -11057,18 +10703,20 @@ func rewriteValueARM_OpARMRSCconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSCshiftLL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSCshiftLL (MOVWconst [c]) x [d] flags) // result: (SBCconst [c] (SLLconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -11082,13 +10730,12 @@ func rewriteValueARM_OpARMRSCshiftLL_0(v *Value) bool { // result: (RSCconst x [int64(int32(uint32(c)< x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -11122,14 +10772,13 @@ func rewriteValueARM_OpARMRSCshiftLLreg_0(v *Value) bool { // match: (RSCshiftLLreg x y (MOVWconst [c]) flags) // result: (RSCshiftLL x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMRSCshiftLL) v.AuxInt = c v.AddArg(x) @@ -11140,18 +10789,20 @@ func rewriteValueARM_OpARMRSCshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSCshiftRA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSCshiftRA (MOVWconst [c]) x [d] flags) // result: (SBCconst [c] (SRAconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -11165,13 +10816,12 @@ func rewriteValueARM_OpARMRSCshiftRA_0(v *Value) bool { // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags) for { d := v.AuxInt - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) @@ -11181,18 +10831,21 @@ func rewriteValueARM_OpARMRSCshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSCshiftRAreg_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSCshiftRAreg (MOVWconst [c]) x y flags) // result: (SBCconst [c] (SRA x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -11205,14 +10858,13 @@ func rewriteValueARM_OpARMRSCshiftRAreg_0(v *Value) bool { // match: (RSCshiftRAreg x y (MOVWconst [c]) flags) // result: (RSCshiftRA x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMRSCshiftRA) v.AuxInt = c v.AddArg(x) @@ -11223,18 +10875,20 @@ func rewriteValueARM_OpARMRSCshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSCshiftRL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSCshiftRL (MOVWconst [c]) x [d] flags) // result: (SBCconst [c] (SRLconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -11248,13 +10902,12 @@ func rewriteValueARM_OpARMRSCshiftRL_0(v *Value) bool { // result: (RSCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) for { d := v.AuxInt - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) @@ -11264,18 +10917,21 @@ func rewriteValueARM_OpARMRSCshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMRSCshiftRLreg_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RSCshiftRLreg (MOVWconst [c]) x y flags) // result: (SBCconst [c] (SRL x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMSBCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -11288,14 +10944,13 @@ func rewriteValueARM_OpARMRSCshiftRLreg_0(v *Value) bool { // match: (RSCshiftRLreg x y (MOVWconst [c]) flags) // result: (RSCshiftRL x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMRSCshiftRL) v.AuxInt = c v.AddArg(x) @@ -11306,16 +10961,18 @@ func rewriteValueARM_OpARMRSCshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBC_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SBC (MOVWconst [c]) x flags) // result: (RSCconst [c] x flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = c v.AddArg(x) @@ -11325,13 +10982,12 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC x (MOVWconst [c]) flags) // result: (SBCconst [c] x flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = c v.AddArg(x) @@ -11341,14 +10997,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC x (SLLconst [c] y) flags) // result: (SBCshiftLL x y [c] flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } c := v_1.AuxInt y := v_1.Args[0] + flags := v_2 v.reset(OpARMSBCshiftLL) v.AuxInt = c v.AddArg(x) @@ -11359,14 +11014,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC (SLLconst [c] y) x flags) // result: (RSCshiftLL x y [c] flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt y := v_0.Args[0] - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCshiftLL) v.AuxInt = c v.AddArg(x) @@ -11377,14 +11031,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC x (SRLconst [c] y) flags) // result: (SBCshiftRL x y [c] flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } c := v_1.AuxInt y := v_1.Args[0] + flags := v_2 v.reset(OpARMSBCshiftRL) v.AuxInt = c v.AddArg(x) @@ -11395,14 +11048,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC (SRLconst [c] y) x flags) // result: (RSCshiftRL x y [c] flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt y := v_0.Args[0] - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCshiftRL) v.AuxInt = c v.AddArg(x) @@ -11413,14 +11065,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC x (SRAconst [c] y) flags) // result: (SBCshiftRA x y [c] flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } c := v_1.AuxInt y := v_1.Args[0] + flags := v_2 v.reset(OpARMSBCshiftRA) v.AuxInt = c v.AddArg(x) @@ -11431,14 +11082,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC (SRAconst [c] y) x flags) // result: (RSCshiftRA x y [c] flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt y := v_0.Args[0] - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCshiftRA) v.AuxInt = c v.AddArg(x) @@ -11449,14 +11099,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC x (SLL y z) flags) // result: (SBCshiftLLreg x y z flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLL { break } z := v_1.Args[1] y := v_1.Args[0] + flags := v_2 v.reset(OpARMSBCshiftLLreg) v.AddArg(x) v.AddArg(y) @@ -11467,14 +11116,13 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { // match: (SBC (SLL y z) x flags) // result: (RSCshiftLLreg x y z flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSLL { break } z := v_0.Args[1] y := v_0.Args[0] - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCshiftLLreg) v.AddArg(x) v.AddArg(y) @@ -11485,17 +11133,19 @@ func rewriteValueARM_OpARMSBC_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBC_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SBC x (SRL y z) flags) // result: (SBCshiftRLreg x y z flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRL { break } z := v_1.Args[1] y := v_1.Args[0] + flags := v_2 v.reset(OpARMSBCshiftRLreg) v.AddArg(x) v.AddArg(y) @@ -11506,14 +11156,13 @@ func rewriteValueARM_OpARMSBC_10(v *Value) bool { // match: (SBC (SRL y z) x flags) // result: (RSCshiftRLreg x y z flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSRL { break } z := v_0.Args[1] y := v_0.Args[0] - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCshiftRLreg) v.AddArg(x) v.AddArg(y) @@ -11524,14 +11173,13 @@ func rewriteValueARM_OpARMSBC_10(v *Value) bool { // match: (SBC x (SRA y z) flags) // result: (SBCshiftRAreg x y z flags) for { - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRA { break } z := v_1.Args[1] y := v_1.Args[0] + flags := v_2 v.reset(OpARMSBCshiftRAreg) v.AddArg(x) v.AddArg(y) @@ -11542,14 +11190,13 @@ func rewriteValueARM_OpARMSBC_10(v *Value) bool { // match: (SBC (SRA y z) x flags) // result: (RSCshiftRAreg x y z flags) for { - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMSRA { break } z := v_0.Args[1] y := v_0.Args[0] - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCshiftRAreg) v.AddArg(x) v.AddArg(y) @@ -11560,17 +11207,18 @@ func rewriteValueARM_OpARMSBC_10(v *Value) bool { return false } func rewriteValueARM_OpARMSBCconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SBCconst [c] (ADDconst [d] x) flags) // result: (SBCconst [int64(int32(c-d))] x flags) for { c := v.AuxInt - flags := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } d := v_0.AuxInt x := v_0.Args[0] + flags := v_1 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c - d)) v.AddArg(x) @@ -11581,13 +11229,12 @@ func rewriteValueARM_OpARMSBCconst_0(v *Value) bool { // result: (SBCconst [int64(int32(c+d))] x flags) for { c := v.AuxInt - flags := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } d := v_0.AuxInt x := v_0.Args[0] + flags := v_1 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c + d)) v.AddArg(x) @@ -11597,18 +11244,20 @@ func rewriteValueARM_OpARMSBCconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBCshiftLL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SBCshiftLL (MOVWconst [c]) x [d] flags) // result: (RSCconst [c] (SLLconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -11622,13 +11271,12 @@ func rewriteValueARM_OpARMSBCshiftLL_0(v *Value) bool { // result: (SBCconst x [int64(int32(uint32(c)< x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -11662,14 +11313,13 @@ func rewriteValueARM_OpARMSBCshiftLLreg_0(v *Value) bool { // match: (SBCshiftLLreg x y (MOVWconst [c]) flags) // result: (SBCshiftLL x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMSBCshiftLL) v.AuxInt = c v.AddArg(x) @@ -11680,18 +11330,20 @@ func rewriteValueARM_OpARMSBCshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBCshiftRA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SBCshiftRA (MOVWconst [c]) x [d] flags) // result: (RSCconst [c] (SRAconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -11705,13 +11357,12 @@ func rewriteValueARM_OpARMSBCshiftRA_0(v *Value) bool { // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags) for { d := v.AuxInt - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) @@ -11721,18 +11372,21 @@ func rewriteValueARM_OpARMSBCshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBCshiftRAreg_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SBCshiftRAreg (MOVWconst [c]) x y flags) // result: (RSCconst [c] (SRA x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -11745,14 +11399,13 @@ func rewriteValueARM_OpARMSBCshiftRAreg_0(v *Value) bool { // match: (SBCshiftRAreg x y (MOVWconst [c]) flags) // result: (SBCshiftRA x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMSBCshiftRA) v.AuxInt = c v.AddArg(x) @@ -11763,18 +11416,20 @@ func rewriteValueARM_OpARMSBCshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBCshiftRL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SBCshiftRL (MOVWconst [c]) x [d] flags) // result: (RSCconst [c] (SRLconst x [d]) flags) for { d := v.AuxInt - flags := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + flags := v_2 v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -11788,13 +11443,12 @@ func rewriteValueARM_OpARMSBCshiftRL_0(v *Value) bool { // result: (SBCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) for { d := v.AuxInt - flags := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt + flags := v_2 v.reset(OpARMSBCconst) v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) @@ -11804,18 +11458,21 @@ func rewriteValueARM_OpARMSBCshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMSBCshiftRLreg_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SBCshiftRLreg (MOVWconst [c]) x y flags) // result: (RSCconst [c] (SRL x y) flags) for { - flags := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] + x := v_1 + y := v_2 + flags := v_3 v.reset(OpARMRSCconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -11828,14 +11485,13 @@ func rewriteValueARM_OpARMSBCshiftRLreg_0(v *Value) bool { // match: (SBCshiftRLreg x y (MOVWconst [c]) flags) // result: (SBCshiftRL x y [c] flags) for { - flags := v.Args[3] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } c := v_2.AuxInt + flags := v_3 v.reset(OpARMSBCshiftRL) v.AuxInt = c v.AddArg(x) @@ -11846,12 +11502,12 @@ func rewriteValueARM_OpARMSBCshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SLL x (MOVWconst [c])) // result: (SLLconst x [c&31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -11864,11 +11520,11 @@ func rewriteValueARM_OpARMSLL_0(v *Value) bool { return false } func rewriteValueARM_OpARMSLLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SLLconst [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(uint32(d)<>uint64(c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -11991,7 +11640,6 @@ func rewriteValueARM_OpARMSRAconst_0(v *Value) bool { // result: (BFX [(d-c)|(32-d)<<8] x) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } @@ -12008,12 +11656,12 @@ func rewriteValueARM_OpARMSRAconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMSRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SRL x (MOVWconst [c])) // result: (SRLconst x [c&31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -12026,11 +11674,11 @@ func rewriteValueARM_OpARMSRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMSRLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRLconst [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(uint32(d)>>uint64(c)))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -12044,7 +11692,6 @@ func rewriteValueARM_OpARMSRLconst_0(v *Value) bool { // result: (BFXU [(d-c)|(32-d)<<8] x) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } @@ -12061,15 +11708,16 @@ func rewriteValueARM_OpARMSRLconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUB (MOVWconst [c]) x) // result: (RSBconst [c] x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBconst) v.AuxInt = c v.AddArg(x) @@ -12078,9 +11726,7 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB x (MOVWconst [c])) // result: (SUBconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -12093,9 +11739,7 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB x (SLLconst [c] y)) // result: (SUBshiftLL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } @@ -12110,13 +11754,12 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB (SLLconst [c] y) x) // result: (RSBshiftLL x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBshiftLL) v.AuxInt = c v.AddArg(x) @@ -12126,9 +11769,7 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB x (SRLconst [c] y)) // result: (SUBshiftRL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -12143,13 +11784,12 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB (SRLconst [c] y) x) // result: (RSBshiftRL x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBshiftRL) v.AuxInt = c v.AddArg(x) @@ -12159,9 +11799,7 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB x (SRAconst [c] y)) // result: (SUBshiftRA x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -12176,13 +11814,12 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB (SRAconst [c] y) x) // result: (RSBshiftRA x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBshiftRA) v.AuxInt = c v.AddArg(x) @@ -12192,9 +11829,7 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB x (SLL y z)) // result: (SUBshiftLLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLL { break } @@ -12209,13 +11844,12 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { // match: (SUB (SLL y z) x) // result: (RSBshiftLLreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBshiftLLreg) v.AddArg(x) v.AddArg(y) @@ -12225,12 +11859,12 @@ func rewriteValueARM_OpARMSUB_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUB_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUB x (SRL y z)) // result: (SUBshiftRLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRL { break } @@ -12245,13 +11879,12 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { // match: (SUB (SRL y z) x) // result: (RSBshiftRLreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBshiftRLreg) v.AddArg(x) v.AddArg(y) @@ -12261,9 +11894,7 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { // match: (SUB x (SRA y z)) // result: (SUBshiftRAreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRA { break } @@ -12278,13 +11909,12 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { // match: (SUB (SRA y z) x) // result: (RSBshiftRAreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRA { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBshiftRAreg) v.AddArg(x) v.AddArg(y) @@ -12294,8 +11924,8 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { // match: (SUB x x) // result: (MOVWconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARMMOVWconst) @@ -12306,9 +11936,7 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { // cond: objabi.GOARM == 7 // result: (MULS x y a) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARMMUL { break } @@ -12326,13 +11954,13 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { return false } func rewriteValueARM_OpARMSUBD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBD a (MULD x y)) // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULSD a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARMMULD { break } @@ -12351,9 +11979,7 @@ func rewriteValueARM_OpARMSUBD_0(v *Value) bool { // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULAD a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARMNMULD { break } @@ -12371,13 +11997,13 @@ func rewriteValueARM_OpARMSUBD_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBF_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBF a (MULF x y)) // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULSF a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARMMULF { break } @@ -12396,9 +12022,7 @@ func rewriteValueARM_OpARMSUBF_0(v *Value) bool { // cond: a.Uses == 1 && objabi.GOARM >= 6 // result: (MULAF a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARMNMULF { break } @@ -12416,12 +12040,12 @@ func rewriteValueARM_OpARMSUBF_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBS x (MOVWconst [c])) // result: (SUBSconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -12434,9 +12058,7 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS x (SLLconst [c] y)) // result: (SUBSshiftLL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } @@ -12451,13 +12073,12 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS (SLLconst [c] y) x) // result: (RSBSshiftLL x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBSshiftLL) v.AuxInt = c v.AddArg(x) @@ -12467,9 +12088,7 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS x (SRLconst [c] y)) // result: (SUBSshiftRL x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -12484,13 +12103,12 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS (SRLconst [c] y) x) // result: (RSBSshiftRL x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBSshiftRL) v.AuxInt = c v.AddArg(x) @@ -12500,9 +12118,7 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS x (SRAconst [c] y)) // result: (SUBSshiftRA x y [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -12517,13 +12133,12 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS (SRAconst [c] y) x) // result: (RSBSshiftRA x y [c]) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBSshiftRA) v.AuxInt = c v.AddArg(x) @@ -12533,9 +12148,7 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS x (SLL y z)) // result: (SUBSshiftLLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLL { break } @@ -12550,13 +12163,12 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS (SLL y z) x) // result: (RSBSshiftLLreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSLL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBSshiftLLreg) v.AddArg(x) v.AddArg(y) @@ -12566,9 +12178,7 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { // match: (SUBS x (SRL y z)) // result: (SUBSshiftRLreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRL { break } @@ -12583,16 +12193,17 @@ func rewriteValueARM_OpARMSUBS_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBS_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBS (SRL y z) x) // result: (RSBSshiftRLreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBSshiftRLreg) v.AddArg(x) v.AddArg(y) @@ -12602,9 +12213,7 @@ func rewriteValueARM_OpARMSUBS_10(v *Value) bool { // match: (SUBS x (SRA y z)) // result: (SUBSshiftRAreg x y z) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRA { break } @@ -12619,13 +12228,12 @@ func rewriteValueARM_OpARMSUBS_10(v *Value) bool { // match: (SUBS (SRA y z) x) // result: (RSBSshiftRAreg x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMSRA { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpARMRSBSshiftRAreg) v.AddArg(x) v.AddArg(y) @@ -12635,17 +12243,18 @@ func rewriteValueARM_OpARMSUBS_10(v *Value) bool { return false } func rewriteValueARM_OpARMSUBSshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBSshiftLL (MOVWconst [c]) x [d]) // result: (RSBSconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -12658,9 +12267,7 @@ func rewriteValueARM_OpARMSUBSshiftLL_0(v *Value) bool { // result: (SUBSconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -12695,10 +12304,8 @@ func rewriteValueARM_OpARMSUBSshiftLLreg_0(v *Value) bool { // match: (SUBSshiftLLreg x y (MOVWconst [c])) // result: (SUBSshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -12712,17 +12319,18 @@ func rewriteValueARM_OpARMSUBSshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBSshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBSshiftRA (MOVWconst [c]) x [d]) // result: (RSBSconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -12735,9 +12343,7 @@ func rewriteValueARM_OpARMSUBSshiftRA_0(v *Value) bool { // result: (SUBSconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -12750,17 +12356,19 @@ func rewriteValueARM_OpARMSUBSshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBSshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBSshiftRAreg (MOVWconst [c]) x y) // result: (RSBSconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -12772,10 +12380,8 @@ func rewriteValueARM_OpARMSUBSshiftRAreg_0(v *Value) bool { // match: (SUBSshiftRAreg x y (MOVWconst [c])) // result: (SUBSshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -12789,17 +12395,18 @@ func rewriteValueARM_OpARMSUBSshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBSshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBSshiftRL (MOVWconst [c]) x [d]) // result: (RSBSconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -12812,9 +12419,7 @@ func rewriteValueARM_OpARMSUBSshiftRL_0(v *Value) bool { // result: (SUBSconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -12827,17 +12432,19 @@ func rewriteValueARM_OpARMSUBSshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBSshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBSshiftRLreg (MOVWconst [c]) x y) // result: (RSBSconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMRSBSconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -12849,10 +12456,8 @@ func rewriteValueARM_OpARMSUBSshiftRLreg_0(v *Value) bool { // match: (SUBSshiftRLreg x y (MOVWconst [c])) // result: (SUBSshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -12866,11 +12471,11 @@ func rewriteValueARM_OpARMSUBSshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBconst [off1] (MOVWaddr [off2] {sym} ptr)) // result: (MOVWaddr [off2-off1] {sym} ptr) for { off1 := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break } @@ -12889,7 +12494,7 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -12900,7 +12505,7 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { // result: (ADDconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) { break } @@ -12914,7 +12519,7 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { // result: (ANDconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } @@ -12927,7 +12532,6 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { // result: (MOVWconst [int64(int32(d-c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -12940,7 +12544,6 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { // result: (ADDconst [int64(int32(-c-d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } @@ -12955,7 +12558,6 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { // result: (ADDconst [int64(int32(-c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } @@ -12970,7 +12572,6 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { // result: (RSBconst [int64(int32(-c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMRSBconst { break } @@ -12984,17 +12585,18 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBshiftLL (MOVWconst [c]) x [d]) // result: (RSBconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -13007,9 +12609,7 @@ func rewriteValueARM_OpARMSUBshiftLL_0(v *Value) bool { // result: (SUBconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -13063,10 +12663,8 @@ func rewriteValueARM_OpARMSUBshiftLLreg_0(v *Value) bool { // match: (SUBshiftLLreg x y (MOVWconst [c])) // result: (SUBshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13080,17 +12678,18 @@ func rewriteValueARM_OpARMSUBshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBshiftRA (MOVWconst [c]) x [d]) // result: (RSBconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -13103,9 +12702,7 @@ func rewriteValueARM_OpARMSUBshiftRA_0(v *Value) bool { // result: (SUBconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -13120,9 +12717,7 @@ func rewriteValueARM_OpARMSUBshiftRA_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -13137,17 +12732,19 @@ func rewriteValueARM_OpARMSUBshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBshiftRAreg (MOVWconst [c]) x y) // result: (RSBconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -13159,10 +12756,8 @@ func rewriteValueARM_OpARMSUBshiftRAreg_0(v *Value) bool { // match: (SUBshiftRAreg x y (MOVWconst [c])) // result: (SUBshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13176,17 +12771,18 @@ func rewriteValueARM_OpARMSUBshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBshiftRL (MOVWconst [c]) x [d]) // result: (RSBconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -13199,9 +12795,7 @@ func rewriteValueARM_OpARMSUBshiftRL_0(v *Value) bool { // result: (SUBconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -13216,9 +12810,7 @@ func rewriteValueARM_OpARMSUBshiftRL_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -13233,17 +12825,19 @@ func rewriteValueARM_OpARMSUBshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMSUBshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBshiftRLreg (MOVWconst [c]) x y) // result: (RSBconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMRSBconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -13255,10 +12849,8 @@ func rewriteValueARM_OpARMSUBshiftRLreg_0(v *Value) bool { // match: (SUBshiftRLreg x y (MOVWconst [c])) // result: (SUBshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13272,13 +12864,13 @@ func rewriteValueARM_OpARMSUBshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQ_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (TEQ x (MOVWconst [c])) // result: (TEQconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -13293,10 +12885,8 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // match: (TEQ x (SLLconst [c] y)) // result: (TEQshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -13313,10 +12903,8 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // match: (TEQ x (SRLconst [c] y)) // result: (TEQshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -13333,10 +12921,8 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // match: (TEQ x (SRAconst [c] y)) // result: (TEQshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -13353,10 +12939,8 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // match: (TEQ x (SLL y z)) // result: (TEQshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -13373,10 +12957,8 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // match: (TEQ x (SRL y z)) // result: (TEQshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -13393,10 +12975,8 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { // match: (TEQ x (SRA y z)) // result: (TEQshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -13413,12 +12993,12 @@ func rewriteValueARM_OpARMTEQ_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TEQconst (MOVWconst [x]) [y]) // cond: int32(x^y)==0 // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -13434,7 +13014,6 @@ func rewriteValueARM_OpARMTEQconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -13450,7 +13029,6 @@ func rewriteValueARM_OpARMTEQconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -13464,17 +13042,18 @@ func rewriteValueARM_OpARMTEQconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TEQshiftLL (MOVWconst [c]) x [d]) // result: (TEQconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -13487,9 +13066,7 @@ func rewriteValueARM_OpARMTEQshiftLL_0(v *Value) bool { // result: (TEQconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -13524,10 +13103,8 @@ func rewriteValueARM_OpARMTEQshiftLLreg_0(v *Value) bool { // match: (TEQshiftLLreg x y (MOVWconst [c])) // result: (TEQshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13541,17 +13118,18 @@ func rewriteValueARM_OpARMTEQshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TEQshiftRA (MOVWconst [c]) x [d]) // result: (TEQconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -13564,9 +13142,7 @@ func rewriteValueARM_OpARMTEQshiftRA_0(v *Value) bool { // result: (TEQconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -13579,17 +13155,19 @@ func rewriteValueARM_OpARMTEQshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TEQshiftRAreg (MOVWconst [c]) x y) // result: (TEQconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -13601,10 +13179,8 @@ func rewriteValueARM_OpARMTEQshiftRAreg_0(v *Value) bool { // match: (TEQshiftRAreg x y (MOVWconst [c])) // result: (TEQshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13618,17 +13194,18 @@ func rewriteValueARM_OpARMTEQshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TEQshiftRL (MOVWconst [c]) x [d]) // result: (TEQconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -13641,9 +13218,7 @@ func rewriteValueARM_OpARMTEQshiftRL_0(v *Value) bool { // result: (TEQconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -13656,17 +13231,19 @@ func rewriteValueARM_OpARMTEQshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMTEQshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TEQshiftRLreg (MOVWconst [c]) x y) // result: (TEQconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -13678,10 +13255,8 @@ func rewriteValueARM_OpARMTEQshiftRLreg_0(v *Value) bool { // match: (TEQshiftRLreg x y (MOVWconst [c])) // result: (TEQshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13695,13 +13270,13 @@ func rewriteValueARM_OpARMTEQshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMTST_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (TST x (MOVWconst [c])) // result: (TSTconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -13716,10 +13291,8 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // match: (TST x (SLLconst [c] y)) // result: (TSTshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -13736,10 +13309,8 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // match: (TST x (SRLconst [c] y)) // result: (TSTshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -13756,10 +13327,8 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // match: (TST x (SRAconst [c] y)) // result: (TSTshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -13776,10 +13345,8 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // match: (TST x (SLL y z)) // result: (TSTshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -13796,10 +13363,8 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // match: (TST x (SRL y z)) // result: (TSTshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -13816,10 +13381,8 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { // match: (TST x (SRA y z)) // result: (TSTshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -13836,12 +13399,12 @@ func rewriteValueARM_OpARMTST_0(v *Value) bool { return false } func rewriteValueARM_OpARMTSTconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TSTconst (MOVWconst [x]) [y]) // cond: int32(x&y)==0 // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -13857,7 +13420,6 @@ func rewriteValueARM_OpARMTSTconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -13873,7 +13435,6 @@ func rewriteValueARM_OpARMTSTconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -13887,17 +13448,18 @@ func rewriteValueARM_OpARMTSTconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMTSTshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftLL (MOVWconst [c]) x [d]) // result: (TSTconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -13910,9 +13472,7 @@ func rewriteValueARM_OpARMTSTshiftLL_0(v *Value) bool { // result: (TSTconst x [int64(int32(uint32(c)< x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -13947,10 +13509,8 @@ func rewriteValueARM_OpARMTSTshiftLLreg_0(v *Value) bool { // match: (TSTshiftLLreg x y (MOVWconst [c])) // result: (TSTshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -13964,17 +13524,18 @@ func rewriteValueARM_OpARMTSTshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMTSTshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftRA (MOVWconst [c]) x [d]) // result: (TSTconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -13987,9 +13548,7 @@ func rewriteValueARM_OpARMTSTshiftRA_0(v *Value) bool { // result: (TSTconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -14002,17 +13561,19 @@ func rewriteValueARM_OpARMTSTshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMTSTshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftRAreg (MOVWconst [c]) x y) // result: (TSTconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -14024,10 +13585,8 @@ func rewriteValueARM_OpARMTSTshiftRAreg_0(v *Value) bool { // match: (TSTshiftRAreg x y (MOVWconst [c])) // result: (TSTshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -14041,17 +13600,18 @@ func rewriteValueARM_OpARMTSTshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMTSTshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftRL (MOVWconst [c]) x [d]) // result: (TSTconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -14064,9 +13624,7 @@ func rewriteValueARM_OpARMTSTshiftRL_0(v *Value) bool { // result: (TSTconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -14079,17 +13637,19 @@ func rewriteValueARM_OpARMTSTshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMTSTshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftRLreg (MOVWconst [c]) x y) // result: (TSTconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMTSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -14101,10 +13661,8 @@ func rewriteValueARM_OpARMTSTshiftRLreg_0(v *Value) bool { // match: (TSTshiftRLreg x y (MOVWconst [c])) // result: (TSTshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -14118,13 +13676,13 @@ func rewriteValueARM_OpARMTSTshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMXOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XOR x (MOVWconst [c])) // result: (XORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMMOVWconst { continue } @@ -14139,10 +13697,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SLLconst [c] y)) // result: (XORshiftLL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLLconst { continue } @@ -14159,10 +13715,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SRLconst [c] y)) // result: (XORshiftRL x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRLconst { continue } @@ -14179,10 +13733,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SRAconst [c] y)) // result: (XORshiftRA x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRAconst { continue } @@ -14199,10 +13751,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SRRconst [c] y)) // result: (XORshiftRR x y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRRconst { continue } @@ -14219,10 +13769,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SLL y z)) // result: (XORshiftLLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSLL { continue } @@ -14239,10 +13787,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SRL y z)) // result: (XORshiftRLreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRL { continue } @@ -14259,10 +13805,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (SRA y z)) // result: (XORshiftRAreg x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARMSRA { continue } @@ -14279,8 +13823,8 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x x) // result: (MOVWconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARMMOVWconst) @@ -14290,13 +13834,14 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -14306,7 +13851,6 @@ func rewriteValueARM_OpARMXORconst_0(v *Value) bool { // result: (MOVWconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } @@ -14319,7 +13863,6 @@ func rewriteValueARM_OpARMXORconst_0(v *Value) bool { // result: (XORconst [c^d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARMXORconst { break } @@ -14333,18 +13876,19 @@ func rewriteValueARM_OpARMXORconst_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (XORshiftLL (MOVWconst [c]) x [d]) // result: (XORconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) @@ -14357,9 +13901,7 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { // result: (XORconst x [int64(int32(uint32(c)< [8] (BFXU [armBFAuxInt(8, 8)] x) x) // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { break } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) || x != v_0.Args[0] { + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARMREV16) @@ -14402,16 +13945,15 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { - break - } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 || x != v_0_0.Args[0] || !(objabi.GOARM >= 6) { + if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 { + break + } + x := v_0_0.Args[0] + if x != v_1 || !(objabi.GOARM >= 6) { break } v.reset(OpARMREV16) @@ -14423,9 +13965,7 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSLLconst { break } @@ -14440,17 +13980,19 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftLLreg (MOVWconst [c]) x y) // result: (XORconst [c] (SLL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -14462,10 +14004,8 @@ func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { // match: (XORshiftLLreg x y (MOVWconst [c])) // result: (XORshiftLL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -14479,17 +14019,18 @@ func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRA (MOVWconst [c]) x [d]) // result: (XORconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) @@ -14502,9 +14043,7 @@ func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { // result: (XORconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -14519,9 +14058,7 @@ func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRAconst { break } @@ -14536,17 +14073,19 @@ func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRAreg (MOVWconst [c]) x y) // result: (XORconst [c] (SRA x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) @@ -14558,10 +14097,8 @@ func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { // match: (XORshiftRAreg x y (MOVWconst [c])) // result: (XORshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -14575,17 +14112,18 @@ func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRL (MOVWconst [c]) x [d]) // result: (XORconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) @@ -14598,9 +14136,7 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { // result: (XORconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -14614,9 +14150,11 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { // result: (SRRconst [ c] x) for { c := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst || v_0.AuxInt != 32-c || x != v_0.Args[0] { + if v_0.Op != OpARMSLLconst || v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARMSRRconst) @@ -14629,9 +14167,7 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { // result: (MOVWconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMSRLconst { break } @@ -14646,17 +14182,19 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRLreg (MOVWconst [c]) x y) // result: (XORconst [c] (SRL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -14668,10 +14206,8 @@ func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { // match: (XORshiftRLreg x y (MOVWconst [c])) // result: (XORshiftRL x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARMMOVWconst { break } @@ -14685,17 +14221,18 @@ func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMXORshiftRR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRR (MOVWconst [c]) x [d]) // result: (XORconst [c] (SRRconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARMXORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type) @@ -14708,9 +14245,7 @@ func rewriteValueARM_OpARMXORshiftRR_0(v *Value) bool { // result: (XORconst x [int64(int32(uint32(c)>>uint64(d)|uint32(c)< x y) // result: (ADD (SRLconst (SUB x y) [1]) y) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) v0.AuxInt = 1 @@ -14911,12 +14475,13 @@ func rewriteValueARM_OpAvg32u_0(v *Value) bool { } } func rewriteValueARM_OpBitLen32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (BitLen32 x) // result: (RSBconst [32] (CLZ x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARMRSBconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARMCLZ, t) @@ -14926,13 +14491,14 @@ func rewriteValueARM_OpBitLen32_0(v *Value) bool { } } func rewriteValueARM_OpBswap32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Bswap32 x) // cond: objabi.GOARM==5 // result: (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 5) { break } @@ -14961,7 +14527,7 @@ func rewriteValueARM_OpBswap32_0(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOARM >= 6) { break } @@ -14972,13 +14538,16 @@ func rewriteValueARM_OpBswap32_0(v *Value) bool { return false } func rewriteValueARM_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpARMCALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -14988,30 +14557,33 @@ func rewriteValueARM_OpClosureCall_0(v *Value) bool { } } func rewriteValueARM_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMVN) v.AddArg(x) return true } } func rewriteValueARM_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMVN) v.AddArg(x) return true } } func rewriteValueARM_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMVN) v.AddArg(x) return true @@ -15087,6 +14659,7 @@ func rewriteValueARM_OpConstNil_0(v *Value) bool { } } func rewriteValueARM_OpCtz16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) @@ -15094,7 +14667,7 @@ func rewriteValueARM_OpCtz16_0(v *Value) bool { // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM <= 6) { break } @@ -15125,7 +14698,7 @@ func rewriteValueARM_OpCtz16_0(v *Value) bool { // result: (CLZ (RBIT (ORconst [0x10000] x))) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7) { break } @@ -15142,23 +14715,25 @@ func rewriteValueARM_OpCtz16_0(v *Value) bool { return false } func rewriteValueARM_OpCtz16NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz16NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueARM_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Ctz32 x) // cond: objabi.GOARM<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM <= 6) { break } @@ -15183,7 +14758,7 @@ func rewriteValueARM_OpCtz32_0(v *Value) bool { // result: (CLZ (RBIT x)) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7) { break } @@ -15197,16 +14772,18 @@ func rewriteValueARM_OpCtz32_0(v *Value) bool { return false } func rewriteValueARM_OpCtz32NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz32NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueARM_OpCtz8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) @@ -15214,7 +14791,7 @@ func rewriteValueARM_OpCtz8_0(v *Value) bool { // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM <= 6) { break } @@ -15245,7 +14822,7 @@ func rewriteValueARM_OpCtz8_0(v *Value) bool { // result: (CLZ (RBIT (ORconst [0x100] x))) for { t := v.Type - x := v.Args[0] + x := v_0 if !(objabi.GOARM == 7) { break } @@ -15262,123 +14839,136 @@ func rewriteValueARM_OpCtz8_0(v *Value) bool { return false } func rewriteValueARM_OpCtz8NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz8NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (MOVFW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVFW) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32Fto32U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32U x) // result: (MOVFWU x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVFWU) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (MOVFD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVFD) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32Uto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Uto32F x) // result: (MOVWUF x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVWUF) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32Uto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Uto64F x) // result: (MOVWUD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVWUD) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (MOVWF x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVWF) v.AddArg(x) return true } } func rewriteValueARM_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (MOVWD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVWD) v.AddArg(x) return true } } func rewriteValueARM_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (MOVDW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVDW) v.AddArg(x) return true } } func rewriteValueARM_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (MOVDF x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVDF) v.AddArg(x) return true } } func rewriteValueARM_OpCvt64Fto32U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32U x) // result: (MOVDWU x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVDWU) v.AddArg(x) return true } } func rewriteValueARM_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (Div32 (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpDiv32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -15390,13 +14980,15 @@ func rewriteValueARM_OpDiv16_0(v *Value) bool { } } func rewriteValueARM_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpDiv32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -15408,13 +15000,15 @@ func rewriteValueARM_OpDiv16u_0(v *Value) bool { } } func rewriteValueARM_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32 x y) // result: (SUB (XOR (Select0 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask (XOR x y))) (Signmask (XOR x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUB) v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) @@ -15460,11 +15054,13 @@ func rewriteValueARM_OpDiv32_0(v *Value) bool { } } func rewriteValueARM_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (DIVF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMDIVF) v.AddArg(x) v.AddArg(y) @@ -15472,13 +15068,15 @@ func rewriteValueARM_OpDiv32F_0(v *Value) bool { } } func rewriteValueARM_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32u x y) // result: (Select0 (CALLudiv x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) @@ -15489,11 +15087,13 @@ func rewriteValueARM_OpDiv32u_0(v *Value) bool { } } func rewriteValueARM_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (DIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMDIVD) v.AddArg(x) v.AddArg(y) @@ -15501,13 +15101,15 @@ func rewriteValueARM_OpDiv64F_0(v *Value) bool { } } func rewriteValueARM_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (Div32 (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpDiv32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -15519,13 +15121,15 @@ func rewriteValueARM_OpDiv8_0(v *Value) bool { } } func rewriteValueARM_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpDiv32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -15537,13 +15141,15 @@ func rewriteValueARM_OpDiv8u_0(v *Value) bool { } } func rewriteValueARM_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -15557,12 +15163,14 @@ func rewriteValueARM_OpEq16_0(v *Value) bool { } } func rewriteValueARM_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32 x y) // result: (Equal (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -15572,12 +15180,14 @@ func rewriteValueARM_OpEq32_0(v *Value) bool { } } func rewriteValueARM_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (Equal (CMPF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(x) @@ -15587,12 +15197,14 @@ func rewriteValueARM_OpEq32F_0(v *Value) bool { } } func rewriteValueARM_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (Equal (CMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(x) @@ -15602,13 +15214,15 @@ func rewriteValueARM_OpEq64F_0(v *Value) bool { } } func rewriteValueARM_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -15622,13 +15236,15 @@ func rewriteValueARM_OpEq8_0(v *Value) bool { } } func rewriteValueARM_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (XORconst [1] (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) @@ -15639,12 +15255,14 @@ func rewriteValueARM_OpEqB_0(v *Value) bool { } } func rewriteValueARM_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (Equal (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -15654,12 +15272,15 @@ func rewriteValueARM_OpEqPtr_0(v *Value) bool { } } func rewriteValueARM_OpFMA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMA x y z) // result: (FMULAD z x y) for { - z := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + z := v_2 v.reset(OpARMFMULAD) v.AddArg(z) v.AddArg(x) @@ -15668,13 +15289,15 @@ func rewriteValueARM_OpFMA_0(v *Value) bool { } } func rewriteValueARM_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -15688,13 +15311,15 @@ func rewriteValueARM_OpGeq16_0(v *Value) bool { } } func rewriteValueARM_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -15708,12 +15333,14 @@ func rewriteValueARM_OpGeq16U_0(v *Value) bool { } } func rewriteValueARM_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32 x y) // result: (GreaterEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -15723,12 +15350,14 @@ func rewriteValueARM_OpGeq32_0(v *Value) bool { } } func rewriteValueARM_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (GreaterEqual (CMPF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(x) @@ -15738,12 +15367,14 @@ func rewriteValueARM_OpGeq32F_0(v *Value) bool { } } func rewriteValueARM_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32U x y) // result: (GreaterEqualU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -15753,12 +15384,14 @@ func rewriteValueARM_OpGeq32U_0(v *Value) bool { } } func rewriteValueARM_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (GreaterEqual (CMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(x) @@ -15768,13 +15401,15 @@ func rewriteValueARM_OpGeq64F_0(v *Value) bool { } } func rewriteValueARM_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -15788,13 +15423,15 @@ func rewriteValueARM_OpGeq8_0(v *Value) bool { } } func rewriteValueARM_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -15832,13 +15469,15 @@ func rewriteValueARM_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueARM_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -15852,13 +15491,15 @@ func rewriteValueARM_OpGreater16_0(v *Value) bool { } } func rewriteValueARM_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -15872,12 +15513,14 @@ func rewriteValueARM_OpGreater16U_0(v *Value) bool { } } func rewriteValueARM_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32 x y) // result: (GreaterThan (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -15887,12 +15530,14 @@ func rewriteValueARM_OpGreater32_0(v *Value) bool { } } func rewriteValueARM_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (GreaterThan (CMPF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(x) @@ -15902,12 +15547,14 @@ func rewriteValueARM_OpGreater32F_0(v *Value) bool { } } func rewriteValueARM_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32U x y) // result: (GreaterThanU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -15917,12 +15564,14 @@ func rewriteValueARM_OpGreater32U_0(v *Value) bool { } } func rewriteValueARM_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (GreaterThan (CMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(x) @@ -15932,13 +15581,15 @@ func rewriteValueARM_OpGreater64F_0(v *Value) bool { } } func rewriteValueARM_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -15952,13 +15603,15 @@ func rewriteValueARM_OpGreater8_0(v *Value) bool { } } func rewriteValueARM_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -15972,11 +15625,13 @@ func rewriteValueARM_OpGreater8U_0(v *Value) bool { } } func rewriteValueARM_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul32 x y) // result: (HMUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMHMUL) v.AddArg(x) v.AddArg(y) @@ -15984,11 +15639,13 @@ func rewriteValueARM_OpHmul32_0(v *Value) bool { } } func rewriteValueARM_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul32u x y) // result: (HMULU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMHMULU) v.AddArg(x) v.AddArg(y) @@ -15996,12 +15653,14 @@ func rewriteValueARM_OpHmul32u_0(v *Value) bool { } } func rewriteValueARM_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpARMCALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -16010,12 +15669,14 @@ func rewriteValueARM_OpInterCall_0(v *Value) bool { } } func rewriteValueARM_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsInBounds idx len) // result: (LessThanU (CMP idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(idx) @@ -16025,11 +15686,12 @@ func rewriteValueARM_OpIsInBounds_0(v *Value) bool { } } func rewriteValueARM_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (IsNonNil ptr) // result: (NotEqual (CMPconst [0] ptr)) for { - ptr := v.Args[0] + ptr := v_0 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = 0 @@ -16039,12 +15701,14 @@ func rewriteValueARM_OpIsNonNil_0(v *Value) bool { } } func rewriteValueARM_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsSliceInBounds idx len) // result: (LessEqualU (CMP idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(idx) @@ -16054,13 +15718,15 @@ func rewriteValueARM_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueARM_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -16074,13 +15740,15 @@ func rewriteValueARM_OpLeq16_0(v *Value) bool { } } func rewriteValueARM_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -16094,12 +15762,14 @@ func rewriteValueARM_OpLeq16U_0(v *Value) bool { } } func rewriteValueARM_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32 x y) // result: (LessEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -16109,12 +15779,14 @@ func rewriteValueARM_OpLeq32_0(v *Value) bool { } } func rewriteValueARM_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (GreaterEqual (CMPF y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(y) @@ -16124,12 +15796,14 @@ func rewriteValueARM_OpLeq32F_0(v *Value) bool { } } func rewriteValueARM_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32U x y) // result: (LessEqualU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -16139,12 +15813,14 @@ func rewriteValueARM_OpLeq32U_0(v *Value) bool { } } func rewriteValueARM_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (GreaterEqual (CMPD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(y) @@ -16154,13 +15830,15 @@ func rewriteValueARM_OpLeq64F_0(v *Value) bool { } } func rewriteValueARM_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -16174,13 +15852,15 @@ func rewriteValueARM_OpLeq8_0(v *Value) bool { } } func rewriteValueARM_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -16194,13 +15874,15 @@ func rewriteValueARM_OpLeq8U_0(v *Value) bool { } } func rewriteValueARM_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -16214,13 +15896,15 @@ func rewriteValueARM_OpLess16_0(v *Value) bool { } } func rewriteValueARM_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -16234,12 +15918,14 @@ func rewriteValueARM_OpLess16U_0(v *Value) bool { } } func rewriteValueARM_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32 x y) // result: (LessThan (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -16249,12 +15935,14 @@ func rewriteValueARM_OpLess32_0(v *Value) bool { } } func rewriteValueARM_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (GreaterThan (CMPF y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(y) @@ -16264,12 +15952,14 @@ func rewriteValueARM_OpLess32F_0(v *Value) bool { } } func rewriteValueARM_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32U x y) // result: (LessThanU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -16279,12 +15969,14 @@ func rewriteValueARM_OpLess32U_0(v *Value) bool { } } func rewriteValueARM_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (GreaterThan (CMPD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMGreaterThan) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(y) @@ -16294,13 +15986,15 @@ func rewriteValueARM_OpLess64F_0(v *Value) bool { } } func rewriteValueARM_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -16314,13 +16008,15 @@ func rewriteValueARM_OpLess8_0(v *Value) bool { } } func rewriteValueARM_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMLessThanU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -16334,13 +16030,15 @@ func rewriteValueARM_OpLess8U_0(v *Value) bool { } } func rewriteValueARM_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: t.IsBoolean() // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean()) { break } @@ -16354,8 +16052,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -16369,8 +16067,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && !isSigned(t)) { break } @@ -16384,8 +16082,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -16399,8 +16097,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVHUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -16414,8 +16112,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) || isPtr(t)) { break } @@ -16429,8 +16127,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVFload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -16444,8 +16142,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -16457,12 +16155,12 @@ func rewriteValueARM_OpLoad_0(v *Value) bool { return false } func rewriteValueARM_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVWaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpARMMOVWaddr) v.Aux = sym v.AddArg(base) @@ -16470,13 +16168,15 @@ func rewriteValueARM_OpLocalAddr_0(v *Value) bool { } } func rewriteValueARM_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // result: (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -16495,12 +16195,14 @@ func rewriteValueARM_OpLsh16x16_0(v *Value) bool { } } func rewriteValueARM_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x32 x y) // result: (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -16515,13 +16217,13 @@ func rewriteValueARM_OpLsh16x32_0(v *Value) bool { } } func rewriteValueARM_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16538,8 +16240,6 @@ func rewriteValueARM_OpLsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (Const16 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16554,13 +16254,15 @@ func rewriteValueARM_OpLsh16x64_0(v *Value) bool { return false } func rewriteValueARM_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // result: (SLL x (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSLL) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -16570,13 +16272,15 @@ func rewriteValueARM_OpLsh16x8_0(v *Value) bool { } } func rewriteValueARM_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // result: (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -16595,12 +16299,14 @@ func rewriteValueARM_OpLsh32x16_0(v *Value) bool { } } func rewriteValueARM_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x32 x y) // result: (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -16615,13 +16321,13 @@ func rewriteValueARM_OpLsh32x32_0(v *Value) bool { } } func rewriteValueARM_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16638,8 +16344,6 @@ func rewriteValueARM_OpLsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16654,13 +16358,15 @@ func rewriteValueARM_OpLsh32x64_0(v *Value) bool { return false } func rewriteValueARM_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // result: (SLL x (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSLL) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -16670,13 +16376,15 @@ func rewriteValueARM_OpLsh32x8_0(v *Value) bool { } } func rewriteValueARM_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // result: (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -16695,12 +16403,14 @@ func rewriteValueARM_OpLsh8x16_0(v *Value) bool { } } func rewriteValueARM_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x32 x y) // result: (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) @@ -16715,13 +16425,13 @@ func rewriteValueARM_OpLsh8x32_0(v *Value) bool { } } func rewriteValueARM_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16738,8 +16448,6 @@ func rewriteValueARM_OpLsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (Const8 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16754,13 +16462,15 @@ func rewriteValueARM_OpLsh8x64_0(v *Value) bool { return false } func rewriteValueARM_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // result: (SLL x (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSLL) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -16770,13 +16480,15 @@ func rewriteValueARM_OpLsh8x8_0(v *Value) bool { } } func rewriteValueARM_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -16788,13 +16500,15 @@ func rewriteValueARM_OpMod16_0(v *Value) bool { } } func rewriteValueARM_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -16806,13 +16520,15 @@ func rewriteValueARM_OpMod16u_0(v *Value) bool { } } func rewriteValueARM_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) // result: (SUB (XOR (Select1 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUB) v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) @@ -16852,13 +16568,15 @@ func rewriteValueARM_OpMod32_0(v *Value) bool { } } func rewriteValueARM_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (Select1 (CALLudiv x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) @@ -16869,13 +16587,15 @@ func rewriteValueARM_OpMod32u_0(v *Value) bool { } } func rewriteValueARM_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -16887,13 +16607,15 @@ func rewriteValueARM_OpMod8_0(v *Value) bool { } } func rewriteValueARM_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -16905,6 +16627,9 @@ func rewriteValueARM_OpMod8u_0(v *Value) bool { } } func rewriteValueARM_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -16914,7 +16639,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -16926,9 +16651,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARMMOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) @@ -16946,9 +16671,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -16967,9 +16692,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 1 v.AddArg(dst) @@ -16996,9 +16721,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -17019,9 +16744,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -17049,9 +16774,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 3 v.AddArg(dst) @@ -17094,9 +16819,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARMMOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -17130,9 +16855,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { break } @@ -17149,9 +16874,9 @@ func rewriteValueARM_OpMove_0(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { break } @@ -17169,11 +16894,13 @@ func rewriteValueARM_OpMove_0(v *Value) bool { return false } func rewriteValueARM_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMMUL) v.AddArg(x) v.AddArg(y) @@ -17181,11 +16908,13 @@ func rewriteValueARM_OpMul16_0(v *Value) bool { } } func rewriteValueARM_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMMUL) v.AddArg(x) v.AddArg(y) @@ -17193,11 +16922,13 @@ func rewriteValueARM_OpMul32_0(v *Value) bool { } } func rewriteValueARM_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (MULF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMMULF) v.AddArg(x) v.AddArg(y) @@ -17205,11 +16936,13 @@ func rewriteValueARM_OpMul32F_0(v *Value) bool { } } func rewriteValueARM_OpMul32uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32uhilo x y) // result: (MULLU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMMULLU) v.AddArg(x) v.AddArg(y) @@ -17217,11 +16950,13 @@ func rewriteValueARM_OpMul32uhilo_0(v *Value) bool { } } func rewriteValueARM_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (MULD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMMULD) v.AddArg(x) v.AddArg(y) @@ -17229,11 +16964,13 @@ func rewriteValueARM_OpMul64F_0(v *Value) bool { } } func rewriteValueARM_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMMUL) v.AddArg(x) v.AddArg(y) @@ -17241,10 +16978,11 @@ func rewriteValueARM_OpMul8_0(v *Value) bool { } } func rewriteValueARM_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (RSBconst [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMRSBconst) v.AuxInt = 0 v.AddArg(x) @@ -17252,10 +16990,11 @@ func rewriteValueARM_OpNeg16_0(v *Value) bool { } } func rewriteValueARM_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (RSBconst [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMRSBconst) v.AuxInt = 0 v.AddArg(x) @@ -17263,30 +17002,33 @@ func rewriteValueARM_OpNeg32_0(v *Value) bool { } } func rewriteValueARM_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (NEGF x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMNEGF) v.AddArg(x) return true } } func rewriteValueARM_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (NEGD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMNEGD) v.AddArg(x) return true } } func rewriteValueARM_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (RSBconst [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMRSBconst) v.AuxInt = 0 v.AddArg(x) @@ -17294,13 +17036,15 @@ func rewriteValueARM_OpNeg8_0(v *Value) bool { } } func rewriteValueARM_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -17314,12 +17058,14 @@ func rewriteValueARM_OpNeq16_0(v *Value) bool { } } func rewriteValueARM_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32 x y) // result: (NotEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -17329,12 +17075,14 @@ func rewriteValueARM_OpNeq32_0(v *Value) bool { } } func rewriteValueARM_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (NotEqual (CMPF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(x) @@ -17344,12 +17092,14 @@ func rewriteValueARM_OpNeq32F_0(v *Value) bool { } } func rewriteValueARM_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (NotEqual (CMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(x) @@ -17359,13 +17109,15 @@ func rewriteValueARM_OpNeq64F_0(v *Value) bool { } } func rewriteValueARM_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -17379,11 +17131,13 @@ func rewriteValueARM_OpNeq8_0(v *Value) bool { } } func rewriteValueARM_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMXOR) v.AddArg(x) v.AddArg(y) @@ -17391,12 +17145,14 @@ func rewriteValueARM_OpNeqB_0(v *Value) bool { } } func rewriteValueARM_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (NotEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMNotEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -17406,11 +17162,13 @@ func rewriteValueARM_OpNeqPtr_0(v *Value) bool { } } func rewriteValueARM_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARMLoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -17418,10 +17176,11 @@ func rewriteValueARM_OpNilCheck_0(v *Value) bool { } } func rewriteValueARM_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMXORconst) v.AuxInt = 1 v.AddArg(x) @@ -17429,11 +17188,12 @@ func rewriteValueARM_OpNot_0(v *Value) bool { } } func rewriteValueARM_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) // result: (MOVWaddr [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if ptr.Op != OpSP { break } @@ -17446,7 +17206,7 @@ func rewriteValueARM_OpOffPtr_0(v *Value) bool { // result: (ADDconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpARMADDconst) v.AuxInt = off v.AddArg(ptr) @@ -17454,11 +17214,13 @@ func rewriteValueARM_OpOffPtr_0(v *Value) bool { } } func rewriteValueARM_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) @@ -17466,11 +17228,13 @@ func rewriteValueARM_OpOr16_0(v *Value) bool { } } func rewriteValueARM_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) @@ -17478,11 +17242,13 @@ func rewriteValueARM_OpOr32_0(v *Value) bool { } } func rewriteValueARM_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) @@ -17490,11 +17256,13 @@ func rewriteValueARM_OpOr8_0(v *Value) bool { } } func rewriteValueARM_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) @@ -17502,14 +17270,17 @@ func rewriteValueARM_OpOrB_0(v *Value) bool { } } func rewriteValueARM_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -17525,9 +17296,9 @@ func rewriteValueARM_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -17543,9 +17314,9 @@ func rewriteValueARM_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -17559,15 +17330,19 @@ func rewriteValueARM_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueARM_OpPanicExtend_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicExtend [kind] hi lo y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicExtendA [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 0) { break } @@ -17584,10 +17359,10 @@ func rewriteValueARM_OpPanicExtend_0(v *Value) bool { // result: (LoweredPanicExtendB [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 1) { break } @@ -17604,10 +17379,10 @@ func rewriteValueARM_OpPanicExtend_0(v *Value) bool { // result: (LoweredPanicExtendC [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 2) { break } @@ -17622,15 +17397,15 @@ func rewriteValueARM_OpPanicExtend_0(v *Value) bool { return false } func rewriteValueARM_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVWconst [c])) // result: (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -17653,13 +17428,13 @@ func rewriteValueARM_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueARM_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RotateLeft32 x (MOVWconst [c])) // result: (SRRconst [-c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -17672,8 +17447,8 @@ func rewriteValueARM_OpRotateLeft32_0(v *Value) bool { // match: (RotateLeft32 x y) // result: (SRR x (RSBconst [0] y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRR) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type) @@ -17684,15 +17459,15 @@ func rewriteValueARM_OpRotateLeft32_0(v *Value) bool { } } func rewriteValueARM_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVWconst [c])) // result: (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARMMOVWconst { break } @@ -17715,10 +17490,11 @@ func rewriteValueARM_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueARM_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17726,10 +17502,11 @@ func rewriteValueARM_OpRound32F_0(v *Value) bool { } } func rewriteValueARM_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17737,13 +17514,15 @@ func rewriteValueARM_OpRound64F_0(v *Value) bool { } } func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // result: (CMOVWHSconst (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -17764,13 +17543,15 @@ func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // result: (CMOVWHSconst (SRL (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -17787,15 +17568,15 @@ func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SRLconst (SLLconst x [16]) [c+16]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -17815,8 +17596,6 @@ func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (Const16 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -17831,13 +17610,15 @@ func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool { return false } func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRL) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -17849,13 +17630,15 @@ func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueARM_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -17873,13 +17656,15 @@ func rewriteValueARM_OpRsh16x16_0(v *Value) bool { } } func rewriteValueARM_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -17893,15 +17678,15 @@ func rewriteValueARM_OpRsh16x32_0(v *Value) bool { } } func rewriteValueARM_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SRAconst (SLLconst x [16]) [c+16]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -17921,9 +17706,7 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (SRAconst (SLLconst x [16]) [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -17942,13 +17725,15 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool { return false } func rewriteValueARM_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -17960,13 +17745,15 @@ func rewriteValueARM_OpRsh16x8_0(v *Value) bool { } } func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // result: (CMOVWHSconst (SRL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -17985,12 +17772,14 @@ func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueARM_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux32 x y) // result: (CMOVWHSconst (SRL x y) (CMPconst [256] y) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -18005,13 +17794,13 @@ func rewriteValueARM_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh32Ux64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SRLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18028,8 +17817,6 @@ func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18044,13 +17831,15 @@ func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { return false } func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // result: (SRL x (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRL) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -18060,13 +17849,15 @@ func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueARM_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRAcond) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -18082,12 +17873,14 @@ func rewriteValueARM_OpRsh32x16_0(v *Value) bool { } } func rewriteValueARM_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x32 x y) // result: (SRAcond x y (CMPconst [256] y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRAcond) v.AddArg(x) v.AddArg(y) @@ -18099,13 +17892,13 @@ func rewriteValueARM_OpRsh32x32_0(v *Value) bool { } } func rewriteValueARM_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SRAconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18122,9 +17915,7 @@ func rewriteValueARM_OpRsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (SRAconst x [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18140,13 +17931,15 @@ func rewriteValueARM_OpRsh32x64_0(v *Value) bool { return false } func rewriteValueARM_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // result: (SRA x (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -18156,13 +17949,15 @@ func rewriteValueARM_OpRsh32x8_0(v *Value) bool { } } func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // result: (CMOVWHSconst (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -18183,13 +17978,15 @@ func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // result: (CMOVWHSconst (SRL (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) @@ -18206,15 +18003,15 @@ func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SRLconst (SLLconst x [24]) [c+24]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18234,8 +18031,6 @@ func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (Const8 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18250,13 +18045,15 @@ func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool { return false } func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRL) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -18268,13 +18065,15 @@ func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueARM_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -18292,13 +18091,15 @@ func rewriteValueARM_OpRsh8x16_0(v *Value) bool { } } func rewriteValueARM_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -18312,15 +18113,15 @@ func rewriteValueARM_OpRsh8x32_0(v *Value) bool { } } func rewriteValueARM_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SRAconst (SLLconst x [24]) [c+24]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18340,9 +18141,7 @@ func rewriteValueARM_OpRsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (SRAconst (SLLconst x [24]) [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18361,13 +18160,15 @@ func rewriteValueARM_OpRsh8x64_0(v *Value) bool { return false } func rewriteValueARM_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSRA) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -18379,10 +18180,10 @@ func rewriteValueARM_OpRsh8x8_0(v *Value) bool { } } func rewriteValueARM_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] // match: (Select0 (CALLudiv x (MOVWconst [1]))) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpARMCALLudiv { break } @@ -18401,7 +18202,6 @@ func rewriteValueARM_OpSelect0_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SRLconst [log2(c)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARMCALLudiv { break } @@ -18423,7 +18223,6 @@ func rewriteValueARM_OpSelect0_0(v *Value) bool { // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(uint32(c)/uint32(d)))]) for { - v_0 := v.Args[0] if v_0.Op != OpARMCALLudiv { break } @@ -18445,10 +18244,10 @@ func rewriteValueARM_OpSelect0_0(v *Value) bool { return false } func rewriteValueARM_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] // match: (Select1 (CALLudiv _ (MOVWconst [1]))) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARMCALLudiv { break } @@ -18465,7 +18264,6 @@ func rewriteValueARM_OpSelect1_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ANDconst [c-1] x) for { - v_0 := v.Args[0] if v_0.Op != OpARMCALLudiv { break } @@ -18487,7 +18285,6 @@ func rewriteValueARM_OpSelect1_0(v *Value) bool { // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(uint32(c)%uint32(d)))]) for { - v_0 := v.Args[0] if v_0.Op != OpARMCALLudiv { break } @@ -18509,40 +18306,44 @@ func rewriteValueARM_OpSelect1_0(v *Value) bool { return false } func rewriteValueARM_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVHreg) v.AddArg(x) return true } } func rewriteValueARM_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVBreg) v.AddArg(x) return true } } func rewriteValueARM_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVBreg) v.AddArg(x) return true } } func rewriteValueARM_OpSignmask_0(v *Value) bool { + v_0 := v.Args[0] // match: (Signmask x) // result: (SRAconst x [31]) for { - x := v.Args[0] + x := v_0 v.reset(OpARMSRAconst) v.AuxInt = 31 v.AddArg(x) @@ -18550,12 +18351,13 @@ func rewriteValueARM_OpSignmask_0(v *Value) bool { } } func rewriteValueARM_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SRAconst (RSBconst [0] x) [31]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARMSRAconst) v.AuxInt = 31 v0 := b.NewValue0(v.Pos, OpARMRSBconst, t) @@ -18566,22 +18368,24 @@ func rewriteValueARM_OpSlicemask_0(v *Value) bool { } } func rewriteValueARM_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (SQRTD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMSQRTD) v.AddArg(x) return true } } func rewriteValueARM_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpARMCALLstatic) v.AuxInt = argwid v.Aux = target @@ -18590,14 +18394,17 @@ func rewriteValueARM_OpStaticCall_0(v *Value) bool { } } func rewriteValueARM_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 1 // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -18612,9 +18419,9 @@ func rewriteValueARM_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -18629,9 +18436,9 @@ func rewriteValueARM_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { break } @@ -18646,9 +18453,9 @@ func rewriteValueARM_OpStore_0(v *Value) bool { // result: (MOVFstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -18663,9 +18470,9 @@ func rewriteValueARM_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -18678,11 +18485,13 @@ func rewriteValueARM_OpStore_0(v *Value) bool { return false } func rewriteValueARM_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUB) v.AddArg(x) v.AddArg(y) @@ -18690,11 +18499,13 @@ func rewriteValueARM_OpSub16_0(v *Value) bool { } } func rewriteValueARM_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUB) v.AddArg(x) v.AddArg(y) @@ -18702,11 +18513,13 @@ func rewriteValueARM_OpSub32_0(v *Value) bool { } } func rewriteValueARM_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (SUBF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUBF) v.AddArg(x) v.AddArg(y) @@ -18714,11 +18527,13 @@ func rewriteValueARM_OpSub32F_0(v *Value) bool { } } func rewriteValueARM_OpSub32carry_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32carry x y) // result: (SUBS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUBS) v.AddArg(x) v.AddArg(y) @@ -18726,12 +18541,15 @@ func rewriteValueARM_OpSub32carry_0(v *Value) bool { } } func rewriteValueARM_OpSub32withcarry_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32withcarry x y c) // result: (SBC x y c) for { - c := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + c := v_2 v.reset(OpARMSBC) v.AddArg(x) v.AddArg(y) @@ -18740,11 +18558,13 @@ func rewriteValueARM_OpSub32withcarry_0(v *Value) bool { } } func rewriteValueARM_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (SUBD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUBD) v.AddArg(x) v.AddArg(y) @@ -18752,11 +18572,13 @@ func rewriteValueARM_OpSub64F_0(v *Value) bool { } } func rewriteValueARM_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUB) v.AddArg(x) v.AddArg(y) @@ -18764,11 +18586,13 @@ func rewriteValueARM_OpSub8_0(v *Value) bool { } } func rewriteValueARM_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMSUB) v.AddArg(x) v.AddArg(y) @@ -18776,10 +18600,11 @@ func rewriteValueARM_OpSubPtr_0(v *Value) bool { } } func rewriteValueARM_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -18787,10 +18612,11 @@ func rewriteValueARM_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueARM_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -18798,10 +18624,11 @@ func rewriteValueARM_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueARM_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -18809,13 +18636,16 @@ func rewriteValueARM_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueARM_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpARMLoweredWB) v.Aux = fn v.AddArg(destptr) @@ -18825,11 +18655,13 @@ func rewriteValueARM_OpWB_0(v *Value) bool { } } func rewriteValueARM_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMXOR) v.AddArg(x) v.AddArg(y) @@ -18837,11 +18669,13 @@ func rewriteValueARM_OpXor16_0(v *Value) bool { } } func rewriteValueARM_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMXOR) v.AddArg(x) v.AddArg(y) @@ -18849,11 +18683,13 @@ func rewriteValueARM_OpXor32_0(v *Value) bool { } } func rewriteValueARM_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARMXOR) v.AddArg(x) v.AddArg(y) @@ -18861,6 +18697,8 @@ func rewriteValueARM_OpXor8_0(v *Value) bool { } } func rewriteValueARM_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -18870,7 +18708,7 @@ func rewriteValueARM_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -18882,8 +18720,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARMMOVBstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) @@ -18900,8 +18738,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -18919,8 +18757,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 1 v.AddArg(ptr) @@ -18945,8 +18783,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -18966,8 +18804,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -18993,8 +18831,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 3 v.AddArg(ptr) @@ -19031,8 +18869,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARMMOVBstore) v.AuxInt = 2 v.AddArg(ptr) @@ -19062,8 +18900,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { break } @@ -19082,8 +18920,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { break } @@ -19103,42 +18941,46 @@ func rewriteValueARM_OpZero_0(v *Value) bool { return false } func rewriteValueARM_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVHUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVHUreg) v.AddArg(x) return true } } func rewriteValueARM_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVBUreg) v.AddArg(x) return true } } func rewriteValueARM_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARMMOVBUreg) v.AddArg(x) return true } } func rewriteValueARM_OpZeromask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zeromask x) // result: (SRAconst (RSBshiftRL x x [1]) [31]) for { - x := v.Args[0] + x := v_0 v.reset(OpARMSRAconst) v.AuxInt = 31 v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) @@ -19440,9 +19282,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -19676,9 +19520,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -19884,9 +19730,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -20366,9 +20214,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -20602,9 +20452,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -20810,9 +20662,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -21293,9 +21147,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -21501,9 +21357,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -21737,9 +21595,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -22321,9 +22181,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -22557,9 +22419,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -22765,9 +22629,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -23248,9 +23114,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -23484,9 +23352,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -23692,9 +23562,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -24333,9 +24205,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -24569,9 +24443,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } @@ -24777,9 +24653,11 @@ func rewriteBlockARM(b *Block) bool { break } _ = l.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := l.Args[_i0] - y := l.Args[1^_i0] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 if !(l.Uses == 1) { continue } diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 7fab25a579..87e15d911a 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -957,15 +957,16 @@ func rewriteValueARM64(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADCSflags_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADCSflags x y (Select1 (ADDSconstflags [-1] (ADCzerocarry c)))) // result: (ADCSflags x y c) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } @@ -987,10 +988,8 @@ func rewriteValueARM64_OpARM64ADCSflags_0(v *Value) bool { // match: (ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) // result: (ADDSflags x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } @@ -1010,15 +1009,15 @@ func rewriteValueARM64_OpARM64ADCSflags_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADD x (MOVDconst [c])) // result: (ADDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -1034,10 +1033,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: l.Uses==1 && clobber(l) // result: (MADD a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 if l.Op != OpARM64MUL { continue } @@ -1058,10 +1056,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: l.Uses==1 && clobber(l) // result: (MSUB a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 if l.Op != OpARM64MNEG { continue } @@ -1082,10 +1079,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) // result: (MADDW a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 if l.Op != OpARM64MULW { continue } @@ -1106,10 +1102,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) // result: (MSUBW a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - l := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 if l.Op != OpARM64MNEGW { continue } @@ -1129,10 +1124,8 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // match: (ADD x (NEG y)) // result: (SUB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64NEG { continue } @@ -1148,10 +1141,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ADDshiftLL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { continue } @@ -1172,10 +1164,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ADDshiftRL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { continue } @@ -1196,10 +1187,9 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ADDshiftRA x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { continue } @@ -1220,9 +1210,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x (NEG y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SLL { continue } @@ -1237,7 +1225,6 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } @@ -1293,15 +1280,15 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADD (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { continue } @@ -1316,7 +1303,6 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } @@ -1371,9 +1357,7 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x (NEG y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SLL { continue } @@ -1388,7 +1372,6 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } @@ -1446,9 +1429,7 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { continue } @@ -1467,7 +1448,6 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } @@ -1521,11 +1501,11 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) // result: (MOVDaddr [off1+off2] {sym} ptr) for { off1 := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } @@ -1544,7 +1524,7 @@ func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1554,7 +1534,6 @@ func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { // result: (MOVDconst [c+d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -1567,7 +1546,6 @@ func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { // result: (ADDconst [c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } @@ -1582,7 +1560,6 @@ func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { // result: (ADDconst [c-d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SUBconst { break } @@ -1596,18 +1573,19 @@ func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDshiftLL (MOVDconst [c]) x [d]) // result: (ADDconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) @@ -1620,9 +1598,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { // result: (ADDconst x [int64(uint64(c)< [8] (UBFX [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { break } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) || x != v_0.Args[0] { + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARM64REV16W) @@ -1685,12 +1661,11 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { // result: (EXTRconst [64-c] x2 x) for { c := v.AuxInt - x2 := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c { break } x := v_0.Args[0] + x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c v.AddArg(x2) @@ -1703,13 +1678,12 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { for { t := v.Type c := v.AuxInt - x2 := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64UBFX { break } bfc := v_0.AuxInt x := v_0.Args[0] + x2 := v_1 if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } @@ -1722,17 +1696,18 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADDshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftRA (MOVDconst [c]) x [d]) // result: (ADDconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) @@ -1745,9 +1720,7 @@ func rewriteValueARM64_OpARM64ADDshiftRA_0(v *Value) bool { // result: (ADDconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -1760,17 +1733,18 @@ func rewriteValueARM64_OpARM64ADDshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDshiftRL (MOVDconst [c]) x [d]) // result: (ADDconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) @@ -1783,9 +1757,7 @@ func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { // result: (ADDconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -1799,9 +1771,11 @@ func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { // result: (RORconst [ c] x) for { c := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c || x != v_0.Args[0] { + if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c { + break + } + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARM64RORconst) @@ -1815,13 +1789,10 @@ func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { for { t := v.Type c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c { break } x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) { break } @@ -1833,13 +1804,13 @@ func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64AND_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AND x (MOVDconst [c])) // result: (ANDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -1854,8 +1825,8 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // match: (AND x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -1866,10 +1837,8 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // match: (AND x (MVN y)) // result: (BIC x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MVN { continue } @@ -1885,10 +1854,9 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ANDshiftLL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { continue } @@ -1909,10 +1877,9 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ANDshiftRL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { continue } @@ -1933,10 +1900,9 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ANDshiftRA x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { continue } @@ -1956,6 +1922,7 @@ func rewriteValueARM64_OpARM64AND_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [0] _) // result: (MOVDconst [0]) for { @@ -1972,7 +1939,7 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1982,7 +1949,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (MOVDconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -1995,7 +1961,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (ANDconst [c&d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } @@ -2010,7 +1975,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (ANDconst [c&(1<<32-1)] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVWUreg { break } @@ -2024,7 +1988,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (ANDconst [c&(1<<16-1)] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVHUreg { break } @@ -2038,7 +2001,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (ANDconst [c&(1<<8-1)] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVBUreg { break } @@ -2053,7 +2015,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { ac := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -2072,7 +2033,6 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) for { ac := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } @@ -2089,17 +2049,18 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ANDshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftLL (MOVDconst [c]) x [d]) // result: (ANDconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) @@ -2112,9 +2073,7 @@ func rewriteValueARM64_OpARM64ANDshiftLL_0(v *Value) bool { // result: (ANDconst x [int64(uint64(c)< x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) @@ -2170,9 +2129,7 @@ func rewriteValueARM64_OpARM64ANDshiftRA_0(v *Value) bool { // result: (ANDconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2187,9 +2144,8 @@ func rewriteValueARM64_OpARM64ANDshiftRA_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARM64SRAconst { break } @@ -2205,17 +2161,18 @@ func rewriteValueARM64_OpARM64ANDshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ANDshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDshiftRL (MOVDconst [c]) x [d]) // result: (ANDconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) @@ -2228,9 +2185,7 @@ func rewriteValueARM64_OpARM64ANDshiftRL_0(v *Value) bool { // result: (ANDconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2245,9 +2200,8 @@ func rewriteValueARM64_OpARM64ANDshiftRL_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARM64SRLconst { break } @@ -2263,12 +2217,12 @@ func rewriteValueARM64_OpARM64ANDshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BIC x (MOVDconst [c])) // result: (ANDconst [^c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2281,8 +2235,8 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { // match: (BIC x x) // result: (MOVDconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARM64MOVDconst) @@ -2293,9 +2247,8 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (BICshiftLL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { break } @@ -2314,9 +2267,8 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (BICshiftRL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { break } @@ -2335,9 +2287,8 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (BICshiftRA x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { break } @@ -2355,13 +2306,13 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64BICshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BICshiftLL x (MOVDconst [c]) [d]) // result: (ANDconst x [^int64(uint64(c)<>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2414,9 +2363,7 @@ func rewriteValueARM64_OpARM64BICshiftRA_0(v *Value) bool { // result: (MOVDconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRAconst { break } @@ -2431,13 +2378,13 @@ func rewriteValueARM64_OpARM64BICshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64BICshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (BICshiftRL x (MOVDconst [c]) [d]) // result: (ANDconst x [^int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2452,9 +2399,7 @@ func rewriteValueARM64_OpARM64BICshiftRL_0(v *Value) bool { // result: (MOVDconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRLconst { break } @@ -2469,13 +2414,13 @@ func rewriteValueARM64_OpARM64BICshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMN x (MOVDconst [c])) // result: (CMNconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -2491,10 +2436,9 @@ func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (CMNshiftLL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { continue } @@ -2515,10 +2459,9 @@ func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (CMNshiftRL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { continue } @@ -2539,10 +2482,9 @@ func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (CMNshiftRA x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { continue } @@ -2562,13 +2504,13 @@ func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMNW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMNW x (MOVDconst [c])) // result: (CMNWconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -2583,12 +2525,12 @@ func rewriteValueARM64_OpARM64CMNW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMNWconst (MOVDconst [x]) [y]) // cond: int32(x)==int32(-y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2604,7 +2546,6 @@ func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2620,7 +2561,6 @@ func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2636,7 +2576,6 @@ func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2652,7 +2591,6 @@ func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2666,12 +2604,12 @@ func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMNconst (MOVDconst [x]) [y]) // cond: int64(x)==int64(-y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2687,7 +2625,6 @@ func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2703,7 +2640,6 @@ func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2719,7 +2655,6 @@ func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2735,7 +2670,6 @@ func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -2749,17 +2683,18 @@ func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMNshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftLL (MOVDconst [c]) x [d]) // result: (CMNconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64CMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) @@ -2772,9 +2707,7 @@ func rewriteValueARM64_OpARM64CMNshiftLL_0(v *Value) bool { // result: (CMNconst x [int64(uint64(c)< x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64CMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) @@ -2810,9 +2744,7 @@ func rewriteValueARM64_OpARM64CMNshiftRA_0(v *Value) bool { // result: (CMNconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2825,17 +2757,18 @@ func rewriteValueARM64_OpARM64CMNshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMNshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMNshiftRL (MOVDconst [c]) x [d]) // result: (CMNconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64CMNconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) @@ -2848,9 +2781,7 @@ func rewriteValueARM64_OpARM64CMNshiftRL_0(v *Value) bool { // result: (CMNconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2863,13 +2794,13 @@ func rewriteValueARM64_OpARM64CMNshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMP x (MOVDconst [c])) // result: (CMPconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -2882,12 +2813,11 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // match: (CMP (MOVDconst [c]) x) // result: (InvertFlags (CMPconst [c] x)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = c @@ -2899,9 +2829,8 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (CMPshiftLL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { break } @@ -2920,13 +2849,13 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // cond: clobberIfDead(x0) // result: (InvertFlags (CMPshiftLL x1 y [c])) for { - x1 := v.Args[1] - x0 := v.Args[0] + x0 := v_0 if x0.Op != OpARM64SLLconst { break } c := x0.AuxInt y := x0.Args[0] + x1 := v_1 if !(clobberIfDead(x0)) { break } @@ -2942,9 +2871,8 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (CMPshiftRL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { break } @@ -2963,13 +2891,13 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // cond: clobberIfDead(x0) // result: (InvertFlags (CMPshiftRL x1 y [c])) for { - x1 := v.Args[1] - x0 := v.Args[0] + x0 := v_0 if x0.Op != OpARM64SRLconst { break } c := x0.AuxInt y := x0.Args[0] + x1 := v_1 if !(clobberIfDead(x0)) { break } @@ -2985,9 +2913,8 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (CMPshiftRA x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { break } @@ -3006,13 +2933,13 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { // cond: clobberIfDead(x0) // result: (InvertFlags (CMPshiftRA x1 y [c])) for { - x1 := v.Args[1] - x0 := v.Args[0] + x0 := v_0 if x0.Op != OpARM64SRAconst { break } c := x0.AuxInt y := x0.Args[0] + x1 := v_1 if !(clobberIfDead(x0)) { break } @@ -3027,13 +2954,13 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMPW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVDconst [c])) // result: (CMPWconst [int64(int32(c))] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -3046,12 +2973,11 @@ func rewriteValueARM64_OpARM64CMPW_0(v *Value) bool { // match: (CMPW (MOVDconst [c]) x) // result: (InvertFlags (CMPWconst [int64(int32(c))] x)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) v0.AuxInt = int64(int32(c)) @@ -3062,12 +2988,12 @@ func rewriteValueARM64_OpARM64CMPW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPWconst (MOVDconst [x]) [y]) // cond: int32(x)==int32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3083,7 +3009,6 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3099,7 +3024,6 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3115,7 +3039,6 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3131,7 +3054,6 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3147,7 +3069,6 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVBUreg || !(0xff < int32(c)) { break } @@ -3159,7 +3080,6 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVHUreg || !(0xffff < int32(c)) { break } @@ -3169,12 +3089,12 @@ func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPconst (MOVDconst [x]) [y]) // cond: x==y // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3190,7 +3110,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3206,7 +3125,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3222,7 +3140,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagGT_ULT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3238,7 +3155,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -3254,7 +3170,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVBUreg || !(0xff < c) { break } @@ -3266,7 +3181,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) { break } @@ -3278,7 +3192,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVWUreg || !(0xffffffff < c) { break } @@ -3290,7 +3203,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } @@ -3306,7 +3218,6 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { // result: (FlagLT_ULT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } @@ -3320,17 +3231,18 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMPshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftLL (MOVDconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SLLconst x [d]))) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = c @@ -3345,9 +3257,7 @@ func rewriteValueARM64_OpARM64CMPshiftLL_0(v *Value) bool { // result: (CMPconst x [int64(uint64(c)< x [d]))) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = c @@ -3385,9 +3296,7 @@ func rewriteValueARM64_OpARM64CMPshiftRA_0(v *Value) bool { // result: (CMPconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -3400,17 +3309,18 @@ func rewriteValueARM64_OpARM64CMPshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPshiftRL (MOVDconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SRLconst x [d]))) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = c @@ -3425,9 +3335,7 @@ func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { // result: (CMPconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -3440,16 +3348,18 @@ func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CSEL {cc} x (MOVDconst [0]) flag) // result: (CSEL0 {cc} x flag) for { cc := v.Aux - flag := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } + flag := v_2 v.reset(OpARM64CSEL0) v.Aux = cc v.AddArg(x) @@ -3460,12 +3370,11 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { // result: (CSEL0 {arm64Negate(cc.(Op))} y flag) for { cc := v.Aux - flag := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst || v_0.AuxInt != 0 { break } - y := v.Args[1] + y := v_1 + flag := v_2 v.reset(OpARM64CSEL0) v.Aux = arm64Negate(cc.(Op)) v.AddArg(y) @@ -3476,10 +3385,8 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { // result: (CSEL {arm64Invert(cc.(Op))} x y cmp) for { cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARM64InvertFlags { break } @@ -3496,8 +3403,8 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { // result: x for { cc := v.Aux - flag := v.Args[2] - x := v.Args[0] + x := v_0 + flag := v_2 if !(ccARM64Eval(cc, flag) > 0) { break } @@ -3511,8 +3418,8 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { // result: y for { cc := v.Aux - flag := v.Args[2] - y := v.Args[1] + y := v_1 + flag := v_2 if !(ccARM64Eval(cc, flag) < 0) { break } @@ -3526,10 +3433,8 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { // result: (CSEL {boolval.Op} x y flagArg(boolval)) for { cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARM64CMPWconst || v_2.AuxInt != 0 { break } @@ -3549,10 +3454,8 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { // result: (CSEL {arm64Negate(boolval.Op)} x y flagArg(boolval)) for { cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpARM64CMPWconst || v_2.AuxInt != 0 { break } @@ -3570,13 +3473,13 @@ func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CSEL0 {cc} x (InvertFlags cmp)) // result: (CSEL0 {arm64Invert(cc.(Op))} x cmp) for { cc := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64InvertFlags { break } @@ -3592,8 +3495,8 @@ func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { // result: x for { cc := v.Aux - flag := v.Args[1] - x := v.Args[0] + x := v_0 + flag := v_1 if !(ccARM64Eval(cc, flag) > 0) { break } @@ -3607,7 +3510,7 @@ func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { // result: (MOVDconst [0]) for { cc := v.Aux - flag := v.Args[1] + flag := v_1 if !(ccARM64Eval(cc, flag) < 0) { break } @@ -3620,9 +3523,7 @@ func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { // result: (CSEL0 {boolval.Op} x flagArg(boolval)) for { cc := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64CMPWconst || v_1.AuxInt != 0 { break } @@ -3641,9 +3542,7 @@ func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { // result: (CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval)) for { cc := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64CMPWconst || v_1.AuxInt != 0 { break } @@ -3660,16 +3559,15 @@ func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64DIV_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (DIV (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c/d]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -3681,16 +3579,15 @@ func rewriteValueARM64_OpARM64DIV_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64DIVW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(int32(c)/int32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -3702,12 +3599,12 @@ func rewriteValueARM64_OpARM64DIVW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64EON_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EON x (MOVDconst [c])) // result: (XORconst [^c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -3720,8 +3617,8 @@ func rewriteValueARM64_OpARM64EON_0(v *Value) bool { // match: (EON x x) // result: (MOVDconst [-1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARM64MOVDconst) @@ -3732,9 +3629,8 @@ func rewriteValueARM64_OpARM64EON_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (EONshiftLL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { break } @@ -3753,9 +3649,8 @@ func rewriteValueARM64_OpARM64EON_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (EONshiftRL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { break } @@ -3774,9 +3669,8 @@ func rewriteValueARM64_OpARM64EON_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (EONshiftRA x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { break } @@ -3794,13 +3688,13 @@ func rewriteValueARM64_OpARM64EON_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64EONshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EONshiftLL x (MOVDconst [c]) [d]) // result: (XORconst x [^int64(uint64(c)<>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -3853,9 +3745,7 @@ func rewriteValueARM64_OpARM64EONshiftRA_0(v *Value) bool { // result: (MOVDconst [-1]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRAconst { break } @@ -3870,13 +3760,13 @@ func rewriteValueARM64_OpARM64EONshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64EONshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EONshiftRL x (MOVDconst [c]) [d]) // result: (XORconst x [^int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -3891,9 +3781,7 @@ func rewriteValueARM64_OpARM64EONshiftRL_0(v *Value) bool { // result: (MOVDconst [-1]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRLconst { break } @@ -3908,10 +3796,10 @@ func rewriteValueARM64_OpARM64EONshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { + v_0 := v.Args[0] // match: (Equal (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -3922,7 +3810,6 @@ func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { // match: (Equal (FlagLT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -3933,7 +3820,6 @@ func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { // match: (Equal (FlagLT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -3944,7 +3830,6 @@ func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { // match: (Equal (FlagGT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -3955,7 +3840,6 @@ func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { // match: (Equal (FlagGT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -3966,7 +3850,6 @@ func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { // match: (Equal (InvertFlags x)) // result: (Equal x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -3978,13 +3861,13 @@ func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FADDD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FADDD a (FMULD x y)) // result: (FMADDD a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARM64FMULD { continue } @@ -4001,10 +3884,8 @@ func rewriteValueARM64_OpARM64FADDD_0(v *Value) bool { // match: (FADDD a (FNMULD x y)) // result: (FMSUBD a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARM64FNMULD { continue } @@ -4021,13 +3902,13 @@ func rewriteValueARM64_OpARM64FADDD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FADDS a (FMULS x y)) // result: (FMADDS a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARM64FMULS { continue } @@ -4044,10 +3925,8 @@ func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool { // match: (FADDS a (FNMULS x y)) // result: (FMSUBS a x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - a := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 if v_1.Op != OpARM64FNMULS { continue } @@ -4064,13 +3943,13 @@ func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FCMPD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (FCMPD x (FMOVDconst [0])) // result: (FCMPD0 x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64FMOVDconst || v_1.AuxInt != 0 { break } @@ -4081,11 +3960,10 @@ func rewriteValueARM64_OpARM64FCMPD_0(v *Value) bool { // match: (FCMPD (FMOVDconst [0]) x) // result: (InvertFlags (FCMPD0 x)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64FMOVDconst || v_0.AuxInt != 0 { break } + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64FCMPD0, types.TypeFlags) v0.AddArg(x) @@ -4095,13 +3973,13 @@ func rewriteValueARM64_OpARM64FCMPD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FCMPS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (FCMPS x (FMOVSconst [0])) // result: (FCMPS0 x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64FMOVSconst || v_1.AuxInt != 0 { break } @@ -4112,11 +3990,10 @@ func rewriteValueARM64_OpARM64FCMPS_0(v *Value) bool { // match: (FCMPS (FMOVSconst [0]) x) // result: (InvertFlags (FCMPS0 x)) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64FMOVSconst || v_0.AuxInt != 0 { break } + x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64FCMPS0, types.TypeFlags) v0.AddArg(x) @@ -4126,12 +4003,12 @@ func rewriteValueARM64_OpARM64FCMPS_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVDfpgp_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (FMOVDfpgp (Arg [off] {sym})) // result: @b.Func.Entry (Arg [off] {sym}) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpArg { break } @@ -4148,12 +4025,12 @@ func rewriteValueARM64_OpARM64FMOVDfpgp_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVDgpfp_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (FMOVDgpfp (Arg [off] {sym})) // result: @b.Func.Entry (Arg [off] {sym}) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpArg { break } @@ -4170,6 +4047,8 @@ func rewriteValueARM64_OpARM64FMOVDgpfp_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) @@ -4177,9 +4056,7 @@ func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -4198,13 +4075,12 @@ func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4221,13 +4097,12 @@ func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -4243,14 +4118,13 @@ func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4264,16 +4138,18 @@ func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVDloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDloadidx ptr (MOVDconst [c]) mem) // result: (FMOVDload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64FMOVDload) v.AuxInt = c v.AddArg(ptr) @@ -4283,13 +4159,12 @@ func rewriteValueARM64_OpARM64FMOVDloadidx_0(v *Value) bool { // match: (FMOVDloadidx (MOVDconst [c]) ptr mem) // result: (FMOVDload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64FMOVDload) v.AuxInt = c v.AddArg(ptr) @@ -4299,6 +4174,9 @@ func rewriteValueARM64_OpARM64FMOVDloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) @@ -4306,13 +4184,12 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64FMOVDgpfp { break } val := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = off v.Aux = sym @@ -4327,14 +4204,13 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4352,14 +4228,13 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -4376,15 +4251,14 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4399,17 +4273,20 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVDstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem) // result: (FMOVDstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64FMOVDstore) v.AuxInt = c v.AddArg(ptr) @@ -4420,14 +4297,13 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx_0(v *Value) bool { // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem) // result: (FMOVDstore [c] idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64FMOVDstore) v.AuxInt = c v.AddArg(idx) @@ -4438,6 +4314,8 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) @@ -4445,9 +4323,7 @@ func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -4466,13 +4342,12 @@ func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4489,13 +4364,12 @@ func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -4511,14 +4385,13 @@ func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4532,16 +4405,18 @@ func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVSloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSloadidx ptr (MOVDconst [c]) mem) // result: (FMOVSload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64FMOVSload) v.AuxInt = c v.AddArg(ptr) @@ -4551,13 +4426,12 @@ func rewriteValueARM64_OpARM64FMOVSloadidx_0(v *Value) bool { // match: (FMOVSloadidx (MOVDconst [c]) ptr mem) // result: (FMOVSload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64FMOVSload) v.AuxInt = c v.AddArg(ptr) @@ -4567,6 +4441,9 @@ func rewriteValueARM64_OpARM64FMOVSloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) @@ -4574,13 +4451,12 @@ func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64FMOVSgpfp { break } val := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym @@ -4595,14 +4471,13 @@ func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4620,14 +4495,13 @@ func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -4644,15 +4518,14 @@ func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -4667,17 +4540,20 @@ func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMOVSstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem) // result: (FMOVSstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64FMOVSstore) v.AuxInt = c v.AddArg(ptr) @@ -4688,14 +4564,13 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx_0(v *Value) bool { // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem) // result: (FMOVSstore [c] idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64FMOVSstore) v.AuxInt = c v.AddArg(idx) @@ -4706,17 +4581,17 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMULD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMULD (FNEGD x) y) // result: (FNMULD x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGD { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARM64FNMULD) v.AddArg(x) v.AddArg(y) @@ -4727,17 +4602,17 @@ func rewriteValueARM64_OpARM64FMULD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FMULS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMULS (FNEGS x) y) // result: (FNMULS x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGS { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARM64FNMULS) v.AddArg(x) v.AddArg(y) @@ -4748,10 +4623,10 @@ func rewriteValueARM64_OpARM64FMULS_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FNEGD_0(v *Value) bool { + v_0 := v.Args[0] // match: (FNEGD (FMULD x y)) // result: (FNMULD x y) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FMULD { break } @@ -4765,7 +4640,6 @@ func rewriteValueARM64_OpARM64FNEGD_0(v *Value) bool { // match: (FNEGD (FNMULD x y)) // result: (FMULD x y) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FNMULD { break } @@ -4779,10 +4653,10 @@ func rewriteValueARM64_OpARM64FNEGD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FNEGS_0(v *Value) bool { + v_0 := v.Args[0] // match: (FNEGS (FMULS x y)) // result: (FNMULS x y) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FMULS { break } @@ -4796,7 +4670,6 @@ func rewriteValueARM64_OpARM64FNEGS_0(v *Value) bool { // match: (FNEGS (FNMULS x y)) // result: (FMULS x y) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FNMULS { break } @@ -4810,17 +4683,17 @@ func rewriteValueARM64_OpARM64FNEGS_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FNMULD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FNMULD (FNEGD x) y) // result: (FMULD x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGD { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARM64FMULD) v.AddArg(x) v.AddArg(y) @@ -4831,17 +4704,17 @@ func rewriteValueARM64_OpARM64FNMULD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FNMULS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FNMULS (FNEGS x) y) // result: (FMULS x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGS { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARM64FMULS) v.AddArg(x) v.AddArg(y) @@ -4852,12 +4725,12 @@ func rewriteValueARM64_OpARM64FNMULS_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FSUBD a (FMULD x y)) // result: (FMSUBD a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64FMULD { break } @@ -4872,13 +4745,12 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { // match: (FSUBD (FMULD x y) a) // result: (FNMSUBD a x y) for { - a := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64FMULD { break } y := v_0.Args[1] x := v_0.Args[0] + a := v_1 v.reset(OpARM64FNMSUBD) v.AddArg(a) v.AddArg(x) @@ -4888,9 +4760,7 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { // match: (FSUBD a (FNMULD x y)) // result: (FMADDD a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64FNMULD { break } @@ -4905,13 +4775,12 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { // match: (FSUBD (FNMULD x y) a) // result: (FNMADDD a x y) for { - a := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64FNMULD { break } y := v_0.Args[1] x := v_0.Args[0] + a := v_1 v.reset(OpARM64FNMADDD) v.AddArg(a) v.AddArg(x) @@ -4921,12 +4790,12 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FSUBS a (FMULS x y)) // result: (FMSUBS a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64FMULS { break } @@ -4941,13 +4810,12 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { // match: (FSUBS (FMULS x y) a) // result: (FNMSUBS a x y) for { - a := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64FMULS { break } y := v_0.Args[1] x := v_0.Args[0] + a := v_1 v.reset(OpARM64FNMSUBS) v.AddArg(a) v.AddArg(x) @@ -4957,9 +4825,7 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { // match: (FSUBS a (FNMULS x y)) // result: (FMADDS a x y) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64FNMULS { break } @@ -4974,13 +4840,12 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { // match: (FSUBS (FNMULS x y) a) // result: (FNMADDS a x y) for { - a := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64FNMULS { break } y := v_0.Args[1] x := v_0.Args[0] + a := v_1 v.reset(OpARM64FNMADDS) v.AddArg(a) v.AddArg(x) @@ -4990,10 +4855,10 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterEqual (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5004,7 +4869,6 @@ func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagLT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5015,7 +4879,6 @@ func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagLT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5026,7 +4889,6 @@ func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagGT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5037,7 +4899,6 @@ func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagGT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5048,7 +4909,6 @@ func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (InvertFlags x)) // result: (LessEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5060,10 +4920,10 @@ func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64GreaterEqualF_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterEqualF (InvertFlags x)) // result: (LessEqualF x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5075,10 +4935,10 @@ func rewriteValueARM64_OpARM64GreaterEqualF_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterEqualU (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5089,7 +4949,6 @@ func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagLT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5100,7 +4959,6 @@ func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagLT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5111,7 +4969,6 @@ func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagGT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5122,7 +4979,6 @@ func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (FlagGT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5133,7 +4989,6 @@ func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { // match: (GreaterEqualU (InvertFlags x)) // result: (LessEqualU x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5145,10 +5000,10 @@ func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterThan (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5159,7 +5014,6 @@ func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagLT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5170,7 +5024,6 @@ func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagLT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5181,7 +5034,6 @@ func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagGT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5192,7 +5044,6 @@ func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagGT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5203,7 +5054,6 @@ func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { // match: (GreaterThan (InvertFlags x)) // result: (LessThan x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5215,10 +5065,10 @@ func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64GreaterThanF_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterThanF (InvertFlags x)) // result: (LessThanF x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5230,10 +5080,10 @@ func rewriteValueARM64_OpARM64GreaterThanF_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { + v_0 := v.Args[0] // match: (GreaterThanU (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5244,7 +5094,6 @@ func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagLT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5255,7 +5104,6 @@ func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagLT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5266,7 +5114,6 @@ func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagGT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5277,7 +5124,6 @@ func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { // match: (GreaterThanU (FlagGT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5288,7 +5134,6 @@ func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { // match: (GreaterThanU (InvertFlags x)) // result: (LessThanU x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5300,10 +5145,10 @@ func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessEqual (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5314,7 +5159,6 @@ func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { // match: (LessEqual (FlagLT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5325,7 +5169,6 @@ func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { // match: (LessEqual (FlagLT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5336,7 +5179,6 @@ func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { // match: (LessEqual (FlagGT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5347,7 +5189,6 @@ func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { // match: (LessEqual (FlagGT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5358,7 +5199,6 @@ func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { // match: (LessEqual (InvertFlags x)) // result: (GreaterEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5370,10 +5210,10 @@ func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64LessEqualF_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessEqualF (InvertFlags x)) // result: (GreaterEqualF x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5385,10 +5225,10 @@ func rewriteValueARM64_OpARM64LessEqualF_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessEqualU (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5399,7 +5239,6 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagLT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5410,7 +5249,6 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagLT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5421,7 +5259,6 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagGT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5432,7 +5269,6 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { // match: (LessEqualU (FlagGT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5443,7 +5279,6 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { // match: (LessEqualU (InvertFlags x)) // result: (GreaterEqualU x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5455,10 +5290,10 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessThan (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5469,7 +5304,6 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { // match: (LessThan (FlagLT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5480,7 +5314,6 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { // match: (LessThan (FlagLT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5491,7 +5324,6 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { // match: (LessThan (FlagGT_ULT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5502,7 +5334,6 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { // match: (LessThan (FlagGT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5513,7 +5344,6 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { // match: (LessThan (InvertFlags x)) // result: (GreaterThan x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5525,10 +5355,10 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64LessThanF_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessThanF (InvertFlags x)) // result: (GreaterThanF x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5540,10 +5370,10 @@ func rewriteValueARM64_OpARM64LessThanF_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { + v_0 := v.Args[0] // match: (LessThanU (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -5554,7 +5384,6 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { // match: (LessThanU (FlagLT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -5565,7 +5394,6 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { // match: (LessThanU (FlagLT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -5576,7 +5404,6 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { // match: (LessThanU (FlagGT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -5587,7 +5414,6 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { // match: (LessThanU (FlagGT_UGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -5598,7 +5424,6 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { // match: (LessThanU (InvertFlags x)) // result: (GreaterThanU x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -5610,14 +5435,15 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MADD a x (MOVDconst [-1])) // result: (SUB a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != -1 { break } @@ -5629,9 +5455,7 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // match: (MADD a _ (MOVDconst [0])) // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_2 := v.Args[2] + a := v_0 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } @@ -5643,10 +5467,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // match: (MADD a x (MOVDconst [1])) // result: (ADD a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 1 { break } @@ -5659,10 +5481,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ADDshiftLL a x [log2(c)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5680,10 +5500,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && c>=3 // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5704,10 +5522,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && c>=7 // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5728,10 +5544,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5753,10 +5567,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5778,10 +5590,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5803,10 +5613,8 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -5827,16 +5635,18 @@ func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MADD a (MOVDconst [-1]) x) // result: (SUB a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { break } + x := v_2 v.reset(OpARM64SUB) v.AddArg(a) v.AddArg(x) @@ -5845,9 +5655,7 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // match: (MADD a (MOVDconst [0]) _) // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } @@ -5859,12 +5667,11 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // match: (MADD a (MOVDconst [1]) x) // result: (ADD a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { break } + x := v_2 v.reset(OpARM64ADD) v.AddArg(a) v.AddArg(x) @@ -5874,13 +5681,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ADDshiftLL a x [log2(c)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c)) { break } @@ -5894,13 +5700,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: isPowerOfTwo(c-1) && c>=3 // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c-1) && c >= 3) { break } @@ -5917,13 +5722,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: isPowerOfTwo(c+1) && c>=7 // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c+1) && c >= 7) { break } @@ -5940,13 +5744,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } @@ -5964,13 +5767,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } @@ -5988,13 +5790,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } @@ -6012,13 +5813,12 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } @@ -6035,17 +5835,19 @@ func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MADD_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MADD (MOVDconst [c]) x y) // result: (ADDconst [c] (MUL x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) @@ -6057,14 +5859,11 @@ func rewriteValueARM64_OpARM64MADD_20(v *Value) bool { // match: (MADD a (MOVDconst [c]) (MOVDconst [d])) // result: (ADDconst [c*d] a) for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v_2 := v.Args[2] if v_2.Op != OpARM64MOVDconst { break } @@ -6077,15 +5876,16 @@ func rewriteValueARM64_OpARM64MADD_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MADDW a x (MOVDconst [c])) // cond: int32(c)==-1 // result: (SUB a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6102,9 +5902,7 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: int32(c)==0 // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_2 := v.Args[2] + a := v_0 if v_2.Op != OpARM64MOVDconst { break } @@ -6121,10 +5919,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: int32(c)==1 // result: (ADD a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6141,10 +5937,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ADDshiftLL a x [log2(c)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6162,10 +5956,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c)>=3 // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6186,10 +5978,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c)>=7 // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6210,10 +6000,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6235,10 +6023,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6260,10 +6046,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6285,10 +6069,8 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -6309,18 +6091,20 @@ func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MADDW a (MOVDconst [c]) x) // cond: int32(c)==-1 // result: (SUB a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(int32(c) == -1) { break } @@ -6333,9 +6117,7 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: int32(c)==0 // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -6352,13 +6134,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: int32(c)==1 // result: (ADD a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(int32(c) == 1) { break } @@ -6371,13 +6152,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ADDshiftLL a x [log2(c)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c)) { break } @@ -6391,13 +6171,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c)>=3 // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } @@ -6414,13 +6193,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c)>=7 // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } @@ -6437,13 +6215,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } @@ -6461,13 +6238,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } @@ -6485,13 +6261,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } @@ -6509,13 +6284,12 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } @@ -6532,17 +6306,19 @@ func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MADDW_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MADDW (MOVDconst [c]) x y) // result: (ADDconst [c] (MULW x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) @@ -6554,14 +6330,11 @@ func rewriteValueARM64_OpARM64MADDW_20(v *Value) bool { // match: (MADDW a (MOVDconst [c]) (MOVDconst [d])) // result: (ADDconst [int64(int32(c)*int32(d))] a) for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v_2 := v.Args[2] if v_2.Op != OpARM64MOVDconst { break } @@ -6574,14 +6347,14 @@ func rewriteValueARM64_OpARM64MADDW_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MNEG x (MOVDconst [-1])) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { continue } @@ -6595,9 +6368,7 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // match: (MNEG _ (MOVDconst [0])) // result: (MOVDconst [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { continue } @@ -6610,10 +6381,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // match: (MNEG x (MOVDconst [1])) // result: (NEG x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { continue } @@ -6627,10 +6396,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (NEG (SLLconst [log2(c)] x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6651,10 +6418,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && c >= 3 // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6676,10 +6441,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && c >= 7 // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6703,10 +6466,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6730,10 +6491,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6758,10 +6517,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6785,10 +6542,8 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6812,17 +6567,16 @@ func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MNEG_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MNEG (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [-c*d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64MOVDconst { continue } @@ -6836,15 +6590,15 @@ func rewriteValueARM64_OpARM64MNEG_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MNEGW x (MOVDconst [c])) // cond: int32(c)==-1 // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6863,9 +6617,7 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: int32(c)==0 // result: (MOVDconst [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst { continue } @@ -6883,10 +6635,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: int32(c)==1 // result: (NEG x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6904,10 +6654,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (NEG (SLLconst [log2(c)] x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6928,10 +6676,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6953,10 +6699,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -6980,10 +6724,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -7007,10 +6749,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -7035,10 +6775,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -7062,10 +6800,8 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -7089,17 +6825,16 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MNEGW_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MNEGW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [-int64(int32(c)*int32(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64MOVDconst { continue } @@ -7113,16 +6848,15 @@ func rewriteValueARM64_OpARM64MNEGW_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOD (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c%d]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -7134,16 +6868,15 @@ func rewriteValueARM64_OpARM64MOD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MODW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MODW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(int32(c)%int32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -7155,6 +6888,8 @@ func rewriteValueARM64_OpARM64MODW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -7163,13 +6898,12 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7186,13 +6920,12 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -7208,14 +6941,13 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7232,9 +6964,7 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVBstorezero { break } @@ -7255,8 +6985,6 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -7267,16 +6995,18 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBUloadidx ptr (MOVDconst [c]) mem) // result: (MOVBUload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVBUload) v.AuxInt = c v.AddArg(ptr) @@ -7286,13 +7016,12 @@ func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool { // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) // result: (MOVBUload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVBUload) v.AuxInt = c v.AddArg(ptr) @@ -7303,10 +7032,8 @@ func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVBstorezeroidx { break } @@ -7323,10 +7050,11 @@ func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBUreg x:(MOVBUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUload { break } @@ -7338,7 +7066,7 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // match: (MOVBUreg x:(MOVBUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUloadidx { break } @@ -7350,7 +7078,7 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // match: (MOVBUreg x:(MOVBUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUreg { break } @@ -7361,7 +7089,6 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // match: (MOVBUreg (ANDconst [c] x)) // result: (ANDconst [c&(1<<8-1)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } @@ -7375,7 +7102,6 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // match: (MOVBUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -7388,7 +7114,7 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // cond: x.Type.IsBoolean() // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if !(x.Type.IsBoolean()) { break } @@ -7400,7 +7126,6 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // cond: isARM64BFMask(sc, 1<<8-1, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -7418,7 +7143,6 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { // cond: isARM64BFMask(sc, 1<<8-1, 0) // result: (UBFX [armBFAuxInt(sc, 8)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } @@ -7435,6 +7159,8 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -7443,13 +7169,12 @@ func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7466,13 +7191,12 @@ func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -7488,14 +7212,13 @@ func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7512,9 +7235,7 @@ func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVBstorezero { break } @@ -7532,16 +7253,18 @@ func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBloadidx ptr (MOVDconst [c]) mem) // result: (MOVBload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVBload) v.AuxInt = c v.AddArg(ptr) @@ -7551,13 +7274,12 @@ func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { // match: (MOVBloadidx (MOVDconst [c]) ptr mem) // result: (MOVBload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVBload) v.AuxInt = c v.AddArg(ptr) @@ -7568,10 +7290,8 @@ func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVBstorezeroidx { break } @@ -7588,10 +7308,11 @@ func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBreg x:(MOVBload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBload { break } @@ -7603,7 +7324,7 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { // match: (MOVBreg x:(MOVBloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBloadidx { break } @@ -7615,7 +7336,7 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { // match: (MOVBreg x:(MOVBreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBreg { break } @@ -7626,7 +7347,6 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { // match: (MOVBreg (MOVDconst [c])) // result: (MOVDconst [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -7639,7 +7359,6 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { // cond: lc < 8 // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -7656,6 +7375,9 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -7664,14 +7386,13 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7689,14 +7410,13 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -7713,15 +7433,14 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -7738,12 +7457,11 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = off v.Aux = sym @@ -7756,13 +7474,12 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVBreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym @@ -7776,13 +7493,12 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVBUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym @@ -7796,13 +7512,12 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym @@ -7816,13 +7531,12 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym @@ -7836,13 +7550,12 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym @@ -7856,13 +7569,12 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = off v.Aux = sym @@ -7874,20 +7586,21 @@ func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) // result: (MOVHstore [i-1] {s} ptr0 w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -7912,21 +7625,20 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { continue } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -7951,14 +7663,12 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -7983,21 +7693,20 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) { continue } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -8022,14 +7731,12 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -8054,21 +7761,20 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) { continue } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -8093,9 +7799,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { break } @@ -8104,7 +7808,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { break } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -8129,16 +7833,15 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 { continue } @@ -8147,7 +7850,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { continue } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -8172,15 +7875,13 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -8206,22 +7907,21 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst { continue } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -8244,6 +7944,9 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) @@ -8251,15 +7954,13 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64UBFX { break } bfc := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -8289,22 +7990,21 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64UBFX { continue } bfc := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -8334,9 +8034,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst { break } @@ -8346,7 +8044,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -8376,16 +8074,15 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst { continue } @@ -8395,7 +8092,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { continue } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -8425,10 +8122,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + ptr := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { break } @@ -8530,10 +8226,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != 6 || x0.Aux != s { break } @@ -8603,9 +8298,11 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 x5_1 := x5.Args[1] if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] { continue @@ -8638,10 +8335,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + ptr := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { break } @@ -8695,10 +8391,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s { break } @@ -8720,9 +8415,11 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 x1_1 := x1.Args[1] if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] { continue @@ -8755,10 +8452,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + ptr := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { break } @@ -8824,10 +8520,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s { break } @@ -8853,9 +8548,11 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 x1_1 := x1.Args[1] if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 { continue @@ -8893,6 +8590,9 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) @@ -8900,10 +8600,9 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + ptr := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { break } @@ -8957,10 +8656,9 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s { break } @@ -8982,9 +8680,11 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 x1_1 := x1.Args[1] if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { continue @@ -9017,10 +8717,9 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x := v.Args[2] + ptr := v_0 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -9050,17 +8749,17 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := v_0.Args[_i0] - idx1 := v_0.Args[1^_i0] - w := v.Args[1] - x := v.Args[2] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr1 := v_0_0 + idx1 := v_0_1 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -9088,10 +8787,9 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x := v.Args[2] + ptr := v_0 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -9121,17 +8819,17 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := v_0.Args[_i0] - idx1 := v_0.Args[1^_i0] - w := v.Args[1] - x := v.Args[2] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr1 := v_0_0 + idx1 := v_0_1 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -9159,10 +8857,9 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x := v.Args[2] + ptr := v_0 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -9196,17 +8893,17 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := v_0.Args[_i0] - idx1 := v_0.Args[1^_i0] - w := v.Args[1] - x := v.Args[2] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr1 := v_0_0 + idx1 := v_0_1 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -9238,10 +8935,9 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x := v.Args[2] + ptr := v_0 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -9271,17 +8967,17 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := v_0.Args[_i0] - idx1 := v_0.Args[1^_i0] - w := v.Args[1] - x := v.Args[2] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr1 := v_0_0 + idx1 := v_0_1 + w := v_1 + x := v_2 if x.Op != OpARM64MOVBstoreidx { continue } @@ -9306,17 +9002,20 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) // result: (MOVBstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVBstore) v.AuxInt = c v.AddArg(ptr) @@ -9327,14 +9026,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) // result: (MOVBstore [c] idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVBstore) v.AuxInt = c v.AddArg(idx) @@ -9345,13 +9043,12 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVBstorezeroidx ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVBstorezeroidx) v.AddArg(ptr) v.AddArg(idx) @@ -9361,14 +9058,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVBreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -9379,14 +9075,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVBUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -9397,14 +9092,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -9415,14 +9109,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -9433,14 +9126,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -9451,14 +9143,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -9470,19 +9161,16 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx ptr idx w mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] - v_2 := v.Args[2] if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 8 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpARM64MOVBstoreidx { break } @@ -9500,20 +9188,22 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVWstoreidx ptr idx (REVW w) mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 3 { break } idx := v_1.Args[0] - w := v.Args[2] - x0 := v.Args[3] + w := v_2 + x0 := v_3 if x0.Op != OpARM64MOVBstoreidx { break } @@ -9570,11 +9260,10 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVWstoreidx ptr idx w mem) for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x0 := v.Args[3] + ptr := v_0 + idx := v_1 + w := v_2 + x0 := v_3 if x0.Op != OpARM64MOVBstoreidx { break } @@ -9633,15 +9322,13 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx ptr idx (REV16W w) mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] - w := v.Args[2] - x := v.Args[3] + w := v_2 + x := v_3 if x.Op != OpARM64MOVBstoreidx { break } @@ -9666,11 +9353,10 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx ptr idx w mem) for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] + ptr := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != OpARM64MOVBstoreidx { break } @@ -9696,6 +9382,8 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) @@ -9704,13 +9392,12 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -9727,14 +9414,13 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -9751,13 +9437,12 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -9773,9 +9458,8 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] + ptr0 := v_0 + x := v_1 if x.Op != OpARM64MOVBstorezero { break } @@ -9803,16 +9487,16 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - x := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 + x := v_1 if x.Op != OpARM64MOVBstorezeroidx { continue } @@ -9833,16 +9517,18 @@ func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVBstorezeroidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem) // result: (MOVBstorezero [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = c v.AddArg(ptr) @@ -9852,13 +9538,12 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx_0(v *Value) bool { // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) // result: (MOVBstorezero [c] idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = c v.AddArg(idx) @@ -9869,14 +9554,12 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVHstorezeroidx ptr idx mem) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVBstorezeroidx { break } @@ -9893,6 +9576,8 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) @@ -9900,9 +9585,7 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -9921,13 +9604,12 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -9944,13 +9626,12 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -9966,13 +9647,12 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -9988,14 +9668,13 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10012,9 +9691,7 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDstorezero { break } @@ -10035,8 +9712,6 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -10047,16 +9722,18 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDloadidx ptr (MOVDconst [c]) mem) // result: (MOVDload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c v.AddArg(ptr) @@ -10066,13 +9743,12 @@ func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { // match: (MOVDloadidx (MOVDconst [c]) ptr mem) // result: (MOVDload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c v.AddArg(ptr) @@ -10082,13 +9758,12 @@ func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) // result: (MOVDloadidx8 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 3 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVDloadidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10098,13 +9773,12 @@ func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) // result: (MOVDloadidx8 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 3 { break } idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVDloadidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10115,10 +9789,8 @@ func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDstorezeroidx { break } @@ -10135,16 +9807,18 @@ func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDloadidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem) // result: (MOVDload [c<<3] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVDload) v.AuxInt = c << 3 v.AddArg(ptr) @@ -10155,10 +9829,8 @@ func rewriteValueARM64_OpARM64MOVDloadidx8_0(v *Value) bool { // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDstorezeroidx8 { break } @@ -10175,11 +9847,12 @@ func rewriteValueARM64_OpARM64MOVDloadidx8_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVDreg x) // cond: x.Uses == 1 // result: (MOVDnop x) for { - x := v.Args[0] + x := v_0 if !(x.Uses == 1) { break } @@ -10190,7 +9863,6 @@ func rewriteValueARM64_OpARM64MOVDreg_0(v *Value) bool { // match: (MOVDreg (MOVDconst [c])) // result: (MOVDconst [c]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -10202,6 +9874,9 @@ func rewriteValueARM64_OpARM64MOVDreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) @@ -10209,13 +9884,12 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64FMOVDfpgp { break } val := v_1.Args[0] + mem := v_2 v.reset(OpARM64FMOVDstore) v.AuxInt = off v.Aux = sym @@ -10230,14 +9904,13 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10255,14 +9928,13 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -10279,14 +9951,13 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -10303,15 +9974,14 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10328,12 +9998,11 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = off v.Aux = sym @@ -10344,17 +10013,20 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) // result: (MOVDstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c v.AddArg(ptr) @@ -10365,14 +10037,13 @@ func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) // result: (MOVDstore [c] idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c v.AddArg(idx) @@ -10383,14 +10054,13 @@ func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) // result: (MOVDstoreidx8 ptr idx val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 3 { break } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVDstoreidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10401,14 +10071,13 @@ func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) // result: (MOVDstoreidx8 ptr idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 3 { break } idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVDstoreidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10419,13 +10088,12 @@ func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVDstorezeroidx ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVDstorezeroidx) v.AddArg(ptr) v.AddArg(idx) @@ -10435,17 +10103,20 @@ func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDstoreidx8_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) // result: (MOVDstore [c<<3] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVDstore) v.AuxInt = c << 3 v.AddArg(ptr) @@ -10456,13 +10127,12 @@ func rewriteValueARM64_OpARM64MOVDstoreidx8_0(v *Value) bool { // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) // result: (MOVDstorezeroidx8 ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVDstorezeroidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10472,6 +10142,8 @@ func rewriteValueARM64_OpARM64MOVDstoreidx8_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) @@ -10480,13 +10152,12 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10503,14 +10174,13 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10527,13 +10197,12 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -10549,13 +10218,12 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -10571,9 +10239,8 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] + ptr0 := v_0 + x := v_1 if x.Op != OpARM64MOVDstorezero { break } @@ -10601,16 +10268,17 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - p0 := v.Args[0] + p0 := v_0 if p0.Op != OpARM64ADD { break } _ = p0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := p0.Args[_i0] - idx0 := p0.Args[1^_i0] - x := v.Args[1] + p0_0 := p0.Args[0] + p0_1 := p0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p0_0, p0_1 = _i0+1, p0_1, p0_0 { + ptr0 := p0_0 + idx0 := p0_1 + x := v_1 if x.Op != OpARM64MOVDstorezeroidx { continue } @@ -10637,14 +10305,13 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - p0 := v.Args[0] + p0 := v_0 if p0.Op != OpARM64ADDshiftLL || p0.AuxInt != 3 { break } idx0 := p0.Args[1] ptr0 := p0.Args[0] - x := v.Args[1] + x := v_1 if x.Op != OpARM64MOVDstorezeroidx8 { break } @@ -10664,16 +10331,18 @@ func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem) // result: (MOVDstorezero [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c v.AddArg(ptr) @@ -10683,13 +10352,12 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) // result: (MOVDstorezero [c] idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c v.AddArg(idx) @@ -10699,13 +10367,12 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) // result: (MOVDstorezeroidx8 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 3 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10715,13 +10382,12 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) // result: (MOVDstorezeroidx8 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 3 { break } idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) v.AddArg(ptr) v.AddArg(idx) @@ -10731,16 +10397,18 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVDstorezeroidx8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) // result: (MOVDstorezero [c<<3] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = c << 3 v.AddArg(ptr) @@ -10750,6 +10418,8 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx8_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -10758,13 +10428,12 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10781,13 +10450,12 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -10803,13 +10471,12 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -10825,14 +10492,13 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -10849,9 +10515,7 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVHstorezero { break } @@ -10872,8 +10536,6 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -10884,16 +10546,18 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHUloadidx ptr (MOVDconst [c]) mem) // result: (MOVHUload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c v.AddArg(ptr) @@ -10903,13 +10567,12 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) // result: (MOVHUload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c v.AddArg(ptr) @@ -10919,13 +10582,12 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) // result: (MOVHUloadidx2 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHUloadidx2) v.AddArg(ptr) v.AddArg(idx) @@ -10935,9 +10597,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { // match: (MOVHUloadidx ptr (ADD idx idx) mem) // result: (MOVHUloadidx2 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADD { break } @@ -10945,6 +10605,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { if idx != v_1.Args[0] { break } + mem := v_2 v.reset(OpARM64MOVHUloadidx2) v.AddArg(ptr) v.AddArg(idx) @@ -10954,8 +10615,6 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { // match: (MOVHUloadidx (ADD idx idx) ptr mem) // result: (MOVHUloadidx2 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } @@ -10963,7 +10622,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { if idx != v_0.Args[0] { break } - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVHUloadidx2) v.AddArg(ptr) v.AddArg(idx) @@ -10974,10 +10634,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx { break } @@ -10994,16 +10652,18 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem) // result: (MOVHUload [c<<1] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVHUload) v.AuxInt = c << 1 v.AddArg(ptr) @@ -11014,10 +10674,8 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx2 { break } @@ -11034,10 +10692,11 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHUreg x:(MOVBUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUload { break } @@ -11049,7 +10708,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUload { break } @@ -11061,7 +10720,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVBUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUloadidx { break } @@ -11073,7 +10732,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUloadidx { break } @@ -11085,7 +10744,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUloadidx2 { break } @@ -11097,7 +10756,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVBUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUreg { break } @@ -11108,7 +10767,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUreg { break } @@ -11119,7 +10778,6 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg (ANDconst [c] x)) // result: (ANDconst [c&(1<<16-1)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } @@ -11133,7 +10791,6 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -11146,7 +10803,6 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { // cond: isARM64BFMask(sc, 1<<16-1, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -11163,11 +10819,11 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHUreg (SRLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<16-1, 0) // result: (UBFX [armBFAuxInt(sc, 16)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } @@ -11184,6 +10840,8 @@ func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -11192,13 +10850,12 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -11215,13 +10872,12 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -11237,13 +10893,12 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -11259,14 +10914,13 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -11283,9 +10937,7 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVHstorezero { break } @@ -11303,16 +10955,18 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHloadidx ptr (MOVDconst [c]) mem) // result: (MOVHload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c v.AddArg(ptr) @@ -11322,13 +10976,12 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { // match: (MOVHloadidx (MOVDconst [c]) ptr mem) // result: (MOVHload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c v.AddArg(ptr) @@ -11338,13 +10991,12 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) // result: (MOVHloadidx2 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHloadidx2) v.AddArg(ptr) v.AddArg(idx) @@ -11354,9 +11006,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { // match: (MOVHloadidx ptr (ADD idx idx) mem) // result: (MOVHloadidx2 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADD { break } @@ -11364,6 +11014,7 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { if idx != v_1.Args[0] { break } + mem := v_2 v.reset(OpARM64MOVHloadidx2) v.AddArg(ptr) v.AddArg(idx) @@ -11373,8 +11024,6 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { // match: (MOVHloadidx (ADD idx idx) ptr mem) // result: (MOVHloadidx2 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } @@ -11382,7 +11031,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { if idx != v_0.Args[0] { break } - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVHloadidx2) v.AddArg(ptr) v.AddArg(idx) @@ -11393,10 +11043,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx { break } @@ -11413,16 +11061,18 @@ func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHloadidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem) // result: (MOVHload [c<<1] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVHload) v.AuxInt = c << 1 v.AddArg(ptr) @@ -11433,10 +11083,8 @@ func rewriteValueARM64_OpARM64MOVHloadidx2_0(v *Value) bool { // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx2 { break } @@ -11453,10 +11101,11 @@ func rewriteValueARM64_OpARM64MOVHloadidx2_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHreg x:(MOVBload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBload { break } @@ -11468,7 +11117,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUload { break } @@ -11480,7 +11129,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHload { break } @@ -11492,7 +11141,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBloadidx { break } @@ -11504,7 +11153,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUloadidx { break } @@ -11516,7 +11165,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHloadidx { break } @@ -11528,7 +11177,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHloadidx2 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHloadidx2 { break } @@ -11540,7 +11189,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBreg { break } @@ -11551,7 +11200,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUreg { break } @@ -11562,7 +11211,7 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHreg { break } @@ -11573,10 +11222,10 @@ func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHreg (MOVDconst [c])) // result: (MOVDconst [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -11589,7 +11238,6 @@ func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { // cond: lc < 16 // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -11606,6 +11254,9 @@ func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) @@ -11614,14 +11265,13 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -11639,14 +11289,13 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -11663,14 +11312,13 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -11687,15 +11335,14 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -11712,12 +11359,11 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = off v.Aux = sym @@ -11730,13 +11376,12 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym @@ -11750,13 +11395,12 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym @@ -11770,13 +11414,12 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym @@ -11790,13 +11433,12 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = off v.Aux = sym @@ -11811,14 +11453,12 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -11838,6 +11478,9 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) @@ -11847,21 +11490,20 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { continue } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx { continue } @@ -11888,19 +11530,16 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx2 { break } @@ -11926,14 +11565,12 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -11958,21 +11595,20 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) { continue } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx { continue } @@ -11999,19 +11635,16 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx2 { break } @@ -12037,9 +11670,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { break } @@ -12048,7 +11679,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -12073,16 +11704,15 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { continue } @@ -12091,7 +11721,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { continue } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx { continue } @@ -12118,14 +11748,11 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 { break } @@ -12134,7 +11761,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } w := v_1_0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx2 { break } @@ -12160,15 +11787,13 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -12194,22 +11819,21 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst { continue } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx { continue } @@ -12232,6 +11856,9 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -12241,20 +11868,17 @@ func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstoreidx2 { break } @@ -12278,17 +11902,20 @@ func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) // result: (MOVHstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c v.AddArg(ptr) @@ -12299,14 +11926,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) // result: (MOVHstore [c] idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c v.AddArg(idx) @@ -12317,14 +11943,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12335,9 +11960,7 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx ptr (ADD idx idx) val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADD { break } @@ -12345,7 +11968,8 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { if idx != v_1.Args[0] { break } - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12356,14 +11980,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 1 { break } idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12374,8 +11997,6 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx (ADD idx idx) ptr val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } @@ -12383,8 +12004,9 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { if idx != v_0.Args[0] { break } - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12395,13 +12017,12 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVHstorezeroidx ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVHstorezeroidx) v.AddArg(ptr) v.AddArg(idx) @@ -12411,14 +12032,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -12429,14 +12049,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -12447,14 +12066,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -12465,17 +12083,20 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstoreidx_10(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -12487,19 +12108,16 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_10(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstoreidx ptr idx w mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] - v_2 := v.Args[2] if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 16 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpARM64MOVHstoreidx { break } @@ -12517,17 +12135,20 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) // result: (MOVHstore [c<<1] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVHstore) v.AuxInt = c << 1 v.AddArg(ptr) @@ -12538,13 +12159,12 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12554,14 +12174,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12572,14 +12191,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVHUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12590,14 +12208,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12608,14 +12225,13 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12626,6 +12242,8 @@ func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) @@ -12634,13 +12252,12 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12657,14 +12274,13 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12681,13 +12297,12 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -12703,13 +12318,12 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -12725,9 +12339,8 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] + ptr0 := v_0 + x := v_1 if x.Op != OpARM64MOVHstorezero { break } @@ -12755,16 +12368,16 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - x := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 + x := v_1 if x.Op != OpARM64MOVHstorezeroidx { continue } @@ -12790,14 +12403,12 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - x := v.Args[1] + x := v_1 if x.Op != OpARM64MOVHstorezeroidx2 { break } @@ -12819,16 +12430,18 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem) // result: (MOVHstorezero [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c v.AddArg(ptr) @@ -12838,13 +12451,12 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) // result: (MOVHstorezero [c] idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c v.AddArg(idx) @@ -12854,13 +12466,12 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 1 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12870,9 +12481,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADD { break } @@ -12880,6 +12489,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { if idx != v_1.Args[0] { break } + mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12889,13 +12499,12 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 1 { break } idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12905,8 +12514,6 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } @@ -12914,7 +12521,8 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { if idx != v_0.Args[0] { break } - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12925,14 +12533,12 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstorezeroidx ptr idx mem) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVHstorezeroidx { break } @@ -12949,16 +12555,18 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVHstorezeroidx2_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) // result: (MOVHstorezero [c<<1] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = c << 1 v.AddArg(ptr) @@ -12968,6 +12576,8 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx2_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVQstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) @@ -12976,13 +12586,12 @@ func rewriteValueARM64_OpARM64MOVQstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -12999,14 +12608,13 @@ func rewriteValueARM64_OpARM64MOVQstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13020,6 +12628,8 @@ func rewriteValueARM64_OpARM64MOVQstorezero_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) @@ -13027,9 +12637,7 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64FMOVSstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -13048,13 +12656,12 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13071,13 +12678,12 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -13093,13 +12699,12 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -13115,14 +12720,13 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13139,9 +12743,7 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWstorezero { break } @@ -13162,8 +12764,6 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpSB || !(symIsRO(sym)) { break } @@ -13174,16 +12774,18 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWUloadidx ptr (MOVDconst [c]) mem) // result: (MOVWUload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c v.AddArg(ptr) @@ -13193,13 +12795,12 @@ func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) // result: (MOVWUload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c v.AddArg(ptr) @@ -13209,13 +12810,12 @@ func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) // result: (MOVWUloadidx4 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVWUloadidx4) v.AddArg(ptr) v.AddArg(idx) @@ -13225,13 +12825,12 @@ func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) // result: (MOVWUloadidx4 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 2 { break } idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVWUloadidx4) v.AddArg(ptr) v.AddArg(idx) @@ -13242,10 +12841,8 @@ func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx { break } @@ -13262,16 +12859,18 @@ func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWUloadidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem) // result: (MOVWUload [c<<2] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVWUload) v.AuxInt = c << 2 v.AddArg(ptr) @@ -13282,10 +12881,8 @@ func rewriteValueARM64_OpARM64MOVWUloadidx4_0(v *Value) bool { // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx4 { break } @@ -13302,10 +12899,11 @@ func rewriteValueARM64_OpARM64MOVWUloadidx4_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWUreg x:(MOVBUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUload { break } @@ -13317,7 +12915,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVHUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUload { break } @@ -13329,7 +12927,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVWUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWUload { break } @@ -13341,7 +12939,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVBUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUloadidx { break } @@ -13353,7 +12951,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVHUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUloadidx { break } @@ -13365,7 +12963,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVWUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWUloadidx { break } @@ -13377,7 +12975,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUloadidx2 { break } @@ -13389,7 +12987,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWUloadidx4 { break } @@ -13401,7 +12999,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVBUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUreg { break } @@ -13412,7 +13010,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVHUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUreg { break } @@ -13423,10 +13021,11 @@ func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWUreg x:(MOVWUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWUreg { break } @@ -13437,7 +13036,6 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { // match: (MOVWUreg (ANDconst [c] x)) // result: (ANDconst [c&(1<<32-1)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } @@ -13451,7 +13049,6 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { // match: (MOVWUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -13464,7 +13061,6 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { // cond: isARM64BFMask(sc, 1<<32-1, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -13482,7 +13078,6 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { // cond: isARM64BFMask(sc, 1<<32-1, 0) // result: (UBFX [armBFAuxInt(sc, 32)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } @@ -13499,6 +13094,8 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) @@ -13507,13 +13104,12 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13530,13 +13126,12 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -13552,13 +13147,12 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -13574,14 +13168,13 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -13598,9 +13191,7 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWstorezero { break } @@ -13618,16 +13209,18 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx ptr (MOVDconst [c]) mem) // result: (MOVWload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c v.AddArg(ptr) @@ -13637,13 +13230,12 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx (MOVDconst [c]) ptr mem) // result: (MOVWload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c v.AddArg(ptr) @@ -13653,13 +13245,12 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) // result: (MOVWloadidx4 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVWloadidx4) v.AddArg(ptr) v.AddArg(idx) @@ -13669,13 +13260,12 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) // result: (MOVWloadidx4 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 2 { break } idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVWloadidx4) v.AddArg(ptr) v.AddArg(idx) @@ -13686,10 +13276,8 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx { break } @@ -13706,16 +13294,18 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWloadidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem) // result: (MOVWload [c<<2] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVWload) v.AuxInt = c << 2 v.AddArg(ptr) @@ -13726,10 +13316,8 @@ func rewriteValueARM64_OpARM64MOVWloadidx4_0(v *Value) bool { // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx4 { break } @@ -13746,10 +13334,11 @@ func rewriteValueARM64_OpARM64MOVWloadidx4_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg x:(MOVBload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBload { break } @@ -13761,7 +13350,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVBUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUload { break } @@ -13773,7 +13362,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHload { break } @@ -13785,7 +13374,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHUload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUload { break } @@ -13797,7 +13386,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVWload _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWload { break } @@ -13809,7 +13398,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVBloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBloadidx { break } @@ -13821,7 +13410,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVBUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUloadidx { break } @@ -13833,7 +13422,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHloadidx { break } @@ -13845,7 +13434,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHUloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUloadidx { break } @@ -13857,7 +13446,7 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVWloadidx _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWloadidx { break } @@ -13869,10 +13458,11 @@ func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg x:(MOVHloadidx2 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHloadidx2 { break } @@ -13884,7 +13474,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVHUloadidx2 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHUloadidx2 { break } @@ -13896,7 +13486,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVWloadidx4 _ _ _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWloadidx4 { break } @@ -13908,7 +13498,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVBreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBreg { break } @@ -13919,7 +13509,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVBUreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVBUreg { break } @@ -13930,7 +13520,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVHreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVHreg { break } @@ -13941,7 +13531,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVWreg _)) // result: (MOVDreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64MOVWreg { break } @@ -13952,7 +13542,6 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // match: (MOVWreg (MOVDconst [c])) // result: (MOVDconst [int64(int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -13965,7 +13554,6 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { // cond: lc < 32 // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -13982,6 +13570,9 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) @@ -13989,13 +13580,12 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64FMOVSfpgp { break } val := v_1.Args[0] + mem := v_2 v.reset(OpARM64FMOVSstore) v.AuxInt = off v.Aux = sym @@ -14010,14 +13600,13 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -14035,14 +13624,13 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -14059,14 +13647,13 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil) { break } @@ -14083,15 +13670,14 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -14108,12 +13694,11 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = off v.Aux = sym @@ -14126,13 +13711,12 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym @@ -14146,13 +13730,12 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVWUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = off v.Aux = sym @@ -14167,14 +13750,12 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -14199,21 +13780,20 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 { continue } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstoreidx { continue } @@ -14235,6 +13815,9 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) @@ -14244,19 +13827,16 @@ func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstoreidx4 { break } @@ -14282,15 +13862,13 @@ func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] + ptr0 := v_0 if v_1.Op != OpARM64SRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -14316,22 +13894,21 @@ func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 if v_1.Op != OpARM64SRLconst { continue } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstoreidx { continue } @@ -14359,20 +13936,17 @@ func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { break } s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SRLconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstoreidx4 { break } @@ -14396,17 +13970,20 @@ func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) // result: (MOVWstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c v.AddArg(ptr) @@ -14417,14 +13994,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) // result: (MOVWstore [c] idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c v.AddArg(idx) @@ -14435,14 +14011,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) // result: (MOVWstoreidx4 ptr idx val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14453,14 +14028,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) // result: (MOVWstoreidx4 ptr idx val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 2 { break } idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14471,13 +14045,12 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVWstorezeroidx ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVWstorezeroidx) v.AddArg(ptr) v.AddArg(idx) @@ -14487,14 +14060,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) // result: (MOVWstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVWstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -14505,14 +14077,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) // result: (MOVWstoreidx ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVWstoreidx) v.AddArg(ptr) v.AddArg(idx) @@ -14524,19 +14095,16 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDstoreidx ptr idx w mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 { break } idx := v_1.Args[0] - v_2 := v.Args[2] if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 32 { break } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpARM64MOVWstoreidx { break } @@ -14554,17 +14122,20 @@ func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) // result: (MOVWstore [c<<2] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 v.reset(OpARM64MOVWstore) v.AuxInt = c << 2 v.AddArg(ptr) @@ -14575,13 +14146,12 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) // result: (MOVWstorezeroidx4 ptr idx mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } + mem := v_3 v.reset(OpARM64MOVWstorezeroidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14591,14 +14161,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) // result: (MOVWstoreidx4 ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14609,14 +14178,13 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) // result: (MOVWstoreidx4 ptr idx x mem) for { - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14627,6 +14195,8 @@ func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) @@ -14635,13 +14205,12 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -14658,14 +14227,13 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } @@ -14682,13 +14250,12 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -14704,13 +14271,12 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(off == 0 && sym == nil) { break } @@ -14726,9 +14292,8 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] + ptr0 := v_0 + x := v_1 if x.Op != OpARM64MOVWstorezero { break } @@ -14756,16 +14321,16 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr0 := v_0.Args[_i0] - idx0 := v_0.Args[1^_i0] - x := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr0 := v_0_0 + idx0 := v_0_1 + x := v_1 if x.Op != OpARM64MOVWstorezeroidx { continue } @@ -14791,14 +14356,12 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { break } s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 { break } idx0 := v_0.Args[1] ptr0 := v_0.Args[0] - x := v.Args[1] + x := v_1 if x.Op != OpARM64MOVWstorezeroidx4 { break } @@ -14820,16 +14383,18 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem) // result: (MOVWstorezero [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c v.AddArg(ptr) @@ -14839,13 +14404,12 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem) // result: (MOVWstorezero [c] idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] + idx := v_1 + mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c v.AddArg(idx) @@ -14855,13 +14419,12 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem) // result: (MOVWstorezeroidx4 ptr idx mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64SLLconst || v_1.AuxInt != 2 { break } idx := v_1.Args[0] + mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14871,13 +14434,12 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem) // result: (MOVWstorezeroidx4 ptr idx mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 2 { break } idx := v_0.Args[0] - ptr := v.Args[1] + ptr := v_1 + mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) v.AddArg(ptr) v.AddArg(idx) @@ -14888,14 +14450,12 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVDstorezeroidx ptr idx mem) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 { break } idx := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpARM64MOVWstorezeroidx { break } @@ -14912,16 +14472,18 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) // result: (MOVWstorezero [c<<2] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = c << 2 v.AddArg(ptr) @@ -14931,14 +14493,15 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MSUB a x (MOVDconst [-1])) // result: (ADD a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != -1 { break } @@ -14950,9 +14513,7 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // match: (MSUB a _ (MOVDconst [0])) // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_2 := v.Args[2] + a := v_0 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { break } @@ -14964,10 +14525,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // match: (MSUB a x (MOVDconst [1])) // result: (SUB a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 1 { break } @@ -14980,10 +14539,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SUBshiftLL a x [log2(c)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15001,10 +14558,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && c>=3 // result: (SUB a (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15025,10 +14580,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && c>=7 // result: (ADD a (SUBshiftLL x x [log2(c+1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15049,10 +14602,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15074,10 +14625,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15099,10 +14648,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15124,10 +14671,8 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15148,16 +14693,18 @@ func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MSUB a (MOVDconst [-1]) x) // result: (ADD a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { break } + x := v_2 v.reset(OpARM64ADD) v.AddArg(a) v.AddArg(x) @@ -15166,9 +14713,7 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // match: (MSUB a (MOVDconst [0]) _) // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { break } @@ -15180,12 +14725,11 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // match: (MSUB a (MOVDconst [1]) x) // result: (SUB a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { break } + x := v_2 v.reset(OpARM64SUB) v.AddArg(a) v.AddArg(x) @@ -15195,13 +14739,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SUBshiftLL a x [log2(c)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c)) { break } @@ -15215,13 +14758,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: isPowerOfTwo(c-1) && c>=3 // result: (SUB a (ADDshiftLL x x [log2(c-1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c-1) && c >= 3) { break } @@ -15238,13 +14780,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: isPowerOfTwo(c+1) && c>=7 // result: (ADD a (SUBshiftLL x x [log2(c+1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c+1) && c >= 7) { break } @@ -15261,13 +14802,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } @@ -15285,13 +14825,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } @@ -15309,13 +14848,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } @@ -15333,13 +14871,12 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } @@ -15356,17 +14893,19 @@ func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MSUB_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MSUB (MOVDconst [c]) x y) // result: (ADDconst [c] (MNEG x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) @@ -15378,14 +14917,11 @@ func rewriteValueARM64_OpARM64MSUB_20(v *Value) bool { // match: (MSUB a (MOVDconst [c]) (MOVDconst [d])) // result: (SUBconst [c*d] a) for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v_2 := v.Args[2] if v_2.Op != OpARM64MOVDconst { break } @@ -15398,15 +14934,16 @@ func rewriteValueARM64_OpARM64MSUB_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MSUBW a x (MOVDconst [c])) // cond: int32(c)==-1 // result: (ADD a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15423,9 +14960,7 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: int32(c)==0 // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_2 := v.Args[2] + a := v_0 if v_2.Op != OpARM64MOVDconst { break } @@ -15442,10 +14977,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: int32(c)==1 // result: (SUB a x) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15462,10 +14995,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SUBshiftLL a x [log2(c)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15483,10 +15014,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c)>=3 // result: (SUB a (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15507,10 +15036,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c)>=7 // result: (ADD a (SUBshiftLL x x [log2(c+1)])) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15531,10 +15058,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15556,10 +15081,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15581,10 +15104,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15606,10 +15127,8 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - _ = v.Args[2] - a := v.Args[0] - x := v.Args[1] - v_2 := v.Args[2] + a := v_0 + x := v_1 if v_2.Op != OpARM64MOVDconst { break } @@ -15630,18 +15149,20 @@ func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MSUBW a (MOVDconst [c]) x) // cond: int32(c)==-1 // result: (ADD a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(int32(c) == -1) { break } @@ -15654,9 +15175,7 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: int32(c)==0 // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -15673,13 +15192,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: int32(c)==1 // result: (SUB a x) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(int32(c) == 1) { break } @@ -15692,13 +15210,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SUBshiftLL a x [log2(c)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c)) { break } @@ -15712,13 +15229,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c)>=3 // result: (SUB a (ADDshiftLL x x [log2(c-1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } @@ -15735,13 +15251,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c)>=7 // result: (ADD a (SUBshiftLL x x [log2(c+1)])) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } @@ -15758,13 +15273,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } @@ -15782,13 +15296,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } @@ -15806,13 +15319,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } @@ -15830,13 +15342,12 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - x := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt + x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } @@ -15853,17 +15364,19 @@ func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MSUBW_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MSUBW (MOVDconst [c]) x y) // result: (ADDconst [c] (MNEGW x y)) for { - y := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - x := v.Args[1] + x := v_1 + y := v_2 v.reset(OpARM64ADDconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) @@ -15875,14 +15388,11 @@ func rewriteValueARM64_OpARM64MSUBW_20(v *Value) bool { // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d])) // result: (SUBconst [int64(int32(c)*int32(d))] a) for { - _ = v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v_2 := v.Args[2] if v_2.Op != OpARM64MOVDconst { break } @@ -15895,18 +15405,18 @@ func rewriteValueARM64_OpARM64MSUBW_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MUL (NEG x) y) // result: (MNEG x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64NEG { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARM64MNEG) v.AddArg(x) v.AddArg(y) @@ -15917,10 +15427,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // match: (MUL x (MOVDconst [-1])) // result: (NEG x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { continue } @@ -15933,9 +15441,7 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // match: (MUL _ (MOVDconst [0])) // result: (MOVDconst [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { continue } @@ -15948,10 +15454,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // match: (MUL x (MOVDconst [1])) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { continue } @@ -15966,10 +15470,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -15988,10 +15490,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && c >= 3 // result: (ADDshiftLL x x [log2(c-1)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16011,10 +15511,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && c >= 7 // result: (ADDshiftLL (NEG x) x [log2(c+1)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16036,10 +15534,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16062,10 +15558,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16088,10 +15582,8 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16115,15 +15607,15 @@ func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MUL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MUL x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16145,14 +15637,11 @@ func rewriteValueARM64_OpARM64MUL_10(v *Value) bool { // match: (MUL (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c*d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64MOVDconst { continue } @@ -16166,18 +15655,18 @@ func rewriteValueARM64_OpARM64MUL_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULW (NEG x) y) // result: (MNEGW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64NEG { continue } x := v_0.Args[0] - y := v.Args[1^_i0] + y := v_1 v.reset(OpARM64MNEGW) v.AddArg(x) v.AddArg(y) @@ -16189,10 +15678,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: int32(c)==-1 // result: (NEG x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16210,9 +15697,7 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: int32(c)==0 // result: (MOVDconst [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst { continue } @@ -16230,10 +15715,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: int32(c)==1 // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16252,10 +15735,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16274,10 +15755,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (ADDshiftLL x x [log2(c-1)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16297,10 +15776,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (ADDshiftLL (NEG x) x [log2(c+1)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16322,10 +15799,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16348,10 +15823,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16374,10 +15847,8 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16401,15 +15872,15 @@ func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MULW_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULW x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16431,14 +15902,11 @@ func rewriteValueARM64_OpARM64MULW_10(v *Value) bool { // match: (MULW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(int32(c)*int32(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64MOVDconst { continue } @@ -16452,10 +15920,10 @@ func rewriteValueARM64_OpARM64MULW_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { + v_0 := v.Args[0] // match: (MVN (MOVDconst [c])) // result: (MOVDconst [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -16468,7 +15936,7 @@ func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { // cond: clobberIfDead(x) // result: (MVNshiftLL [c] y) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64SLLconst { break } @@ -16486,7 +15954,7 @@ func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { // cond: clobberIfDead(x) // result: (MVNshiftRL [c] y) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64SRLconst { break } @@ -16504,7 +15972,7 @@ func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { // cond: clobberIfDead(x) // result: (MVNshiftRA [c] y) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64SRAconst { break } @@ -16521,11 +15989,11 @@ func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MVNshiftLL_0(v *Value) bool { + v_0 := v.Args[0] // match: (MVNshiftLL (MOVDconst [c]) [d]) // result: (MOVDconst [^int64(uint64(c)<>uint64(d))]) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -16553,11 +16021,11 @@ func rewriteValueARM64_OpARM64MVNshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64MVNshiftRL_0(v *Value) bool { + v_0 := v.Args[0] // match: (MVNshiftRL (MOVDconst [c]) [d]) // result: (MOVDconst [^int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -16569,10 +16037,10 @@ func rewriteValueARM64_OpARM64MVNshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEG (MUL x y)) // result: (MNEG x y) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MUL { break } @@ -16586,7 +16054,6 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { // match: (NEG (MULW x y)) // result: (MNEGW x y) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MULW { break } @@ -16600,7 +16067,6 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { // match: (NEG (MOVDconst [c])) // result: (MOVDconst [-c]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -16613,7 +16079,7 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { // cond: clobberIfDead(x) // result: (NEGshiftLL [c] y) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64SLLconst { break } @@ -16631,7 +16097,7 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { // cond: clobberIfDead(x) // result: (NEGshiftRL [c] y) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64SRLconst { break } @@ -16649,7 +16115,7 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { // cond: clobberIfDead(x) // result: (NEGshiftRA [c] y) for { - x := v.Args[0] + x := v_0 if x.Op != OpARM64SRAconst { break } @@ -16666,11 +16132,11 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64NEGshiftLL_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGshiftLL (MOVDconst [c]) [d]) // result: (MOVDconst [-int64(uint64(c)<>uint64(d))]) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -16698,11 +16164,11 @@ func rewriteValueARM64_OpARM64NEGshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64NEGshiftRL_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGshiftRL (MOVDconst [c]) [d]) // result: (MOVDconst [-int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -16714,10 +16180,10 @@ func rewriteValueARM64_OpARM64NEGshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { + v_0 := v.Args[0] // match: (NotEqual (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } @@ -16728,7 +16194,6 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { // match: (NotEqual (FlagLT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } @@ -16739,7 +16204,6 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { // match: (NotEqual (FlagLT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_UGT { break } @@ -16750,7 +16214,6 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { // match: (NotEqual (FlagGT_ULT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_ULT { break } @@ -16761,7 +16224,6 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { // match: (NotEqual (FlagGT_UGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } @@ -16772,7 +16234,6 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { // match: (NotEqual (InvertFlags x)) // result: (NotEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } @@ -16784,15 +16245,15 @@ func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64OR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OR x (MOVDconst [c])) // result: (ORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -16807,8 +16268,8 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // match: (OR x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -16819,10 +16280,8 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // match: (OR x (MVN y)) // result: (ORN x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MVN { continue } @@ -16838,10 +16297,9 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ORshiftLL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { continue } @@ -16862,10 +16320,9 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ORshiftRL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { continue } @@ -16886,10 +16343,9 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ORshiftRA x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { continue } @@ -16910,9 +16366,7 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x (NEG y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SLL { continue } @@ -16927,7 +16381,6 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } @@ -16984,9 +16437,7 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { continue } @@ -17001,7 +16452,6 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } @@ -17056,9 +16506,7 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x (NEG y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SLL { continue } @@ -17073,7 +16521,6 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } @@ -17131,9 +16578,7 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { continue } @@ -17152,7 +16597,6 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } @@ -17206,20 +16650,19 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64OR_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) // cond: ac == ^((1< {s} (OffPtr [i0] p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -17330,7 +16769,7 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { if p != x2.Args[0] || mem != x2.Args[1] { continue } - y3 := v.Args[1^_i0] + y3 := v_1 if y3.Op != OpARM64MOVDnop { continue } @@ -17365,9 +16804,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -17418,13 +16856,15 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { continue } _ = p1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - ptr1 := p1.Args[_i1] - idx1 := p1.Args[1^_i1] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x2.Args[1] { continue } - y3 := v.Args[1^_i0] + y3 := v_1 if y3.Op != OpARM64MOVDnop { continue } @@ -17455,9 +16895,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr idx mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -17518,7 +16957,7 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] { continue } - y3 := v.Args[1^_i0] + y3 := v_1 if y3.Op != OpARM64MOVDnop { continue } @@ -17546,9 +16985,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} (OffPtr [i0] p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -17690,7 +17128,7 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { if p != x6.Args[0] || mem != x6.Args[1] { continue } - y7 := v.Args[1^_i0] + y7 := v_1 if y7.Op != OpARM64MOVDnop { continue } @@ -17725,9 +17163,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -17846,13 +17283,15 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { continue } _ = p1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - ptr1 := p1.Args[_i1] - idx1 := p1.Args[1^_i1] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x6.Args[1] { continue } - y7 := v.Args[1^_i0] + y7 := v_1 if y7.Op != OpARM64MOVDnop { continue } @@ -17883,9 +17322,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr idx mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18030,7 +17468,7 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] { continue } - y7 := v.Args[1^_i0] + y7 := v_1 if y7.Op != OpARM64MOVDnop { continue } @@ -18058,9 +17496,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18118,7 +17555,7 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { if p != x2.Args[0] || mem != x2.Args[1] { continue } - y3 := v.Args[1^_i0] + y3 := v_1 if y3.Op != OpARM64MOVDnop { continue } @@ -18155,9 +17592,8 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18197,9 +17633,11 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { continue } _ = p1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - ptr1 := p1.Args[_i1] - idx1 := p1.Args[1^_i1] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x1.Args[1] { continue } @@ -18216,7 +17654,7 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { if mem != x2.Args[1] { continue } - y3 := v.Args[1^_i0] + y3 := v_1 if y3.Op != OpARM64MOVDnop { continue } @@ -18245,15 +17683,16 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64OR_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18310,7 +17749,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] { continue } - y3 := v.Args[1^_i0] + y3 := v_1 if y3.Op != OpARM64MOVDnop { continue } @@ -18344,9 +17783,8 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18488,7 +17926,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if p != x6.Args[0] || mem != x6.Args[1] { continue } - y7 := v.Args[1^_i0] + y7 := v_1 if y7.Op != OpARM64MOVDnop { continue } @@ -18525,9 +17963,8 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18587,9 +18024,11 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { continue } _ = p1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - ptr1 := p1.Args[_i1] - idx1 := p1.Args[1^_i1] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x1.Args[1] { continue } @@ -18654,7 +18093,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if p != x6.Args[0] || mem != x6.Args[1] { continue } - y7 := v.Args[1^_i0] + y7 := v_1 if y7.Op != OpARM64MOVDnop { continue } @@ -18685,9 +18124,8 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr idx mem)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - o0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 { continue } @@ -18828,7 +18266,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] { continue } - y7 := v.Args[1^_i0] + y7 := v_1 if y7.Op != OpARM64MOVDnop { continue } @@ -18860,12 +18298,12 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ORN x (MOVDconst [c])) // result: (ORconst [^c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -18878,8 +18316,8 @@ func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { // match: (ORN x x) // result: (MOVDconst [-1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARM64MOVDconst) @@ -18890,9 +18328,8 @@ func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ORNshiftLL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { break } @@ -18911,9 +18348,8 @@ func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ORNshiftRL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { break } @@ -18932,9 +18368,8 @@ func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (ORNshiftRA x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { break } @@ -18952,13 +18387,13 @@ func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORNshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ORNshiftLL x (MOVDconst [c]) [d]) // result: (ORconst x [^int64(uint64(c)<>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -19011,9 +18444,7 @@ func rewriteValueARM64_OpARM64ORNshiftRA_0(v *Value) bool { // result: (MOVDconst [-1]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRAconst { break } @@ -19028,13 +18459,13 @@ func rewriteValueARM64_OpARM64ORNshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORNshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ORNshiftRL x (MOVDconst [c]) [d]) // result: (ORconst x [^int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -19049,9 +18480,7 @@ func rewriteValueARM64_OpARM64ORNshiftRL_0(v *Value) bool { // result: (MOVDconst [-1]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRLconst { break } @@ -19066,13 +18495,14 @@ func rewriteValueARM64_OpARM64ORNshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -19092,7 +18522,6 @@ func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool { // result: (MOVDconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -19105,7 +18534,6 @@ func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool { // result: (ORconst [c|d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64ORconst { break } @@ -19121,7 +18549,6 @@ func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool { // result: (ORconst [c1] x) for { c1 := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } @@ -19138,18 +18565,19 @@ func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORshiftLL (MOVDconst [c]) x [d]) // result: (ORconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) @@ -19162,9 +18590,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { // result: (ORconst x [int64(uint64(c)< [8] (UBFX [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { break } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) || x != v_0.Args[0] { + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARM64REV16W) @@ -19247,12 +18672,11 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { // result: (EXTRconst [64-c] x2 x) for { c := v.AuxInt - x2 := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c { break } x := v_0.Args[0] + x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c v.AddArg(x2) @@ -19265,13 +18689,12 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { for { t := v.Type c := v.AuxInt - x2 := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64UBFX { break } bfc := v_0.AuxInt x := v_0.Args[0] + x2 := v_1 if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } @@ -19286,14 +18709,11 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { // result: (BFXIL [bfc] y x) for { sc := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64UBFX { break } bfc := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SRLconst || v_1.AuxInt != sc { break } @@ -19315,8 +18735,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { if v.AuxInt != 8 { break } - _ = v.Args[1] - y0 := v.Args[0] + y0 := v_0 if y0.Op != OpARM64MOVDnop { break } @@ -19328,7 +18747,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - y1 := v.Args[1] + y1 := v_1 if y1.Op != OpARM64MOVDnop { break } @@ -19359,6 +18778,8 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) @@ -19368,8 +18789,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 8 { break } - _ = v.Args[1] - y0 := v.Args[0] + y0 := v_0 if y0.Op != OpARM64MOVDnop { break } @@ -19380,7 +18800,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { mem := x0.Args[2] ptr0 := x0.Args[0] idx0 := x0.Args[1] - y1 := v.Args[1] + y1 := v_1 if y1.Op != OpARM64MOVDnop { break } @@ -19395,9 +18815,11 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x1.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { continue } @@ -19420,8 +18842,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 8 { break } - _ = v.Args[1] - y0 := v.Args[0] + y0 := v_0 if y0.Op != OpARM64MOVDnop { break } @@ -19432,7 +18853,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { mem := x0.Args[2] ptr := x0.Args[0] idx := x0.Args[1] - y1 := v.Args[1] + y1 := v_1 if y1.Op != OpARM64MOVDnop { break } @@ -19465,8 +18886,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -19495,7 +18915,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { break } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { break } @@ -19531,8 +18951,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -19559,13 +18978,15 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x1.Args[1] { continue } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { continue } @@ -19597,8 +19018,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -19626,7 +19046,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] { break } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { break } @@ -19659,8 +19079,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -19691,7 +19110,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x1.Args[1] { break } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { break } @@ -19724,8 +19143,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -19796,7 +19214,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p != x3.Args[0] || mem != x3.Args[1] { break } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { break } @@ -19832,8 +19250,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -19870,9 +19287,11 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x1.Args[1] { continue } @@ -19901,7 +19320,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p != x3.Args[0] || mem != x3.Args[1] { continue } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { continue } @@ -19932,8 +19351,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -19999,7 +19417,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if p != x3.Args[0] || mem != x3.Args[1] { break } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { break } @@ -20031,8 +19449,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -20102,7 +19519,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] { break } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { break } @@ -20130,6 +19547,8 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) @@ -20139,8 +19558,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 8 { break } - _ = v.Args[1] - y0 := v.Args[0] + y0 := v_0 if y0.Op != OpARM64MOVDnop { break } @@ -20152,7 +19570,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - y1 := v.Args[1] + y1 := v_1 if y1.Op != OpARM64MOVDnop { break } @@ -20188,8 +19606,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 8 { break } - _ = v.Args[1] - y0 := v.Args[0] + y0 := v_0 if y0.Op != OpARM64MOVDnop { break } @@ -20204,10 +19621,12 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] - y1 := v.Args[1] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 + y1 := v_1 if y1.Op != OpARM64MOVDnop { continue } @@ -20242,8 +19661,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 8 { break } - _ = v.Args[1] - y0 := v.Args[0] + y0 := v_0 if y0.Op != OpARM64MOVDnop { break } @@ -20258,7 +19676,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { break } idx := x0_1.Args[0] - y1 := v.Args[1] + y1 := v_1 if y1.Op != OpARM64MOVDnop { break } @@ -20289,8 +19707,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -20323,7 +19740,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { break } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { break } @@ -20361,8 +19778,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -20392,13 +19808,15 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x1.Args[1] { continue } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { continue } @@ -20433,8 +19851,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 24 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 { break } @@ -20470,7 +19887,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] { break } - y2 := v.Args[1] + y2 := v_1 if y2.Op != OpARM64MOVDnop { break } @@ -20501,8 +19918,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -20577,7 +19993,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if p != x3.Args[0] || mem != x3.Args[1] { break } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { break } @@ -20615,8 +20031,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -20680,13 +20095,15 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { break } _ = p1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr1 := p1.Args[_i0] - idx1 := p1.Args[1^_i0] + p1_0 := p1.Args[0] + p1_1 := p1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 { + ptr1 := p1_0 + idx1 := p1_1 if mem != x3.Args[1] { continue } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { continue } @@ -20721,8 +20138,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if v.AuxInt != 56 { break } - _ = v.Args[1] - o0 := v.Args[0] + o0 := v_0 if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 { break } @@ -20800,7 +20216,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] { break } - y4 := v.Args[1] + y4 := v_1 if y4.Op != OpARM64MOVDnop { break } @@ -20826,17 +20242,18 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftRA (MOVDconst [c]) x [d]) // result: (ORconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) @@ -20849,9 +20266,7 @@ func rewriteValueARM64_OpARM64ORshiftRA_0(v *Value) bool { // result: (ORconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -20866,9 +20281,8 @@ func rewriteValueARM64_OpARM64ORshiftRA_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARM64SRAconst { break } @@ -20884,17 +20298,18 @@ func rewriteValueARM64_OpARM64ORshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORshiftRL (MOVDconst [c]) x [d]) // result: (ORconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64ORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) @@ -20907,9 +20322,7 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { // result: (ORconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -20924,9 +20337,8 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { // result: y for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 if y.Op != OpARM64SRLconst { break } @@ -20943,9 +20355,11 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { // result: (RORconst [ c] x) for { c := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c || x != v_0.Args[0] { + if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c { + break + } + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARM64RORconst) @@ -20959,13 +20373,10 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { for { t := v.Type c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c { break } x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) { break } @@ -20979,14 +20390,11 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { // result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y) for { rc := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } ac := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SLLconst { break } @@ -21006,14 +20414,11 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { // result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x) for { rc := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64ANDconst { break } ac := v_0.AuxInt y := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64SLLconst { break } @@ -21031,11 +20436,11 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64RORWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (RORWconst [c] (RORWconst [d] x)) // result: (RORWconst [(c+d)&31] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64RORWconst { break } @@ -21049,11 +20454,11 @@ func rewriteValueARM64_OpARM64RORWconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64RORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (RORconst [c] (RORconst [d] x)) // result: (RORconst [(c+d)&63] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64RORconst { break } @@ -21067,15 +20472,16 @@ func rewriteValueARM64_OpARM64RORconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SBCSflags_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SBCSflags x y (Select1 (NEGSflags (NEG (NGCzerocarry bo))))) // result: (SBCSflags x y bo) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } @@ -21101,10 +20507,8 @@ func rewriteValueARM64_OpARM64SBCSflags_0(v *Value) bool { // match: (SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) // result: (SUBSflags x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } @@ -21124,12 +20528,12 @@ func rewriteValueARM64_OpARM64SBCSflags_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SLL x (MOVDconst [c])) // result: (SLLconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -21142,11 +20546,11 @@ func rewriteValueARM64_OpARM64SLL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SLLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SLLconst [c] (MOVDconst [d])) // result: (MOVDconst [d<>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -21304,7 +20702,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) for { rc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -21323,7 +20720,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x) for { rc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -21342,7 +20738,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFX [armBFAuxInt(rc, 32-rc)] x) for { rc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVWreg { break } @@ -21360,7 +20755,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFX [armBFAuxInt(rc, 16-rc)] x) for { rc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVHreg { break } @@ -21378,7 +20772,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFX [armBFAuxInt(rc, 8-rc)] x) for { rc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVBreg { break } @@ -21396,7 +20789,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) for { sc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SBFIZ { break } @@ -21415,7 +20807,6 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { // result: (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { sc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SBFIZ { break } @@ -21432,12 +20823,12 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SRL x (MOVDconst [c])) // result: (SRLconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -21450,11 +20841,11 @@ func rewriteValueARM64_OpARM64SRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRLconst [c] (MOVDconst [d])) // result: (MOVDconst [int64(uint64(d)>>uint64(c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -21468,7 +20859,6 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { // result: (ANDconst [1< x z) y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SUB { break } @@ -21889,13 +21261,12 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool { // match: (SUB (SUB x y) z) // result: (SUB x (ADD y z)) for { - z := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SUB { break } y := v_0.Args[1] x := v_0.Args[0] + z := v_1 v.reset(OpARM64SUB) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) @@ -21908,9 +21279,8 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (SUBshiftLL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { break } @@ -21929,9 +21299,8 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (SUBshiftRL x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { break } @@ -21949,13 +21318,14 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SUB_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUB x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (SUBshiftRA x0 y [c]) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { break } @@ -21973,13 +21343,14 @@ func rewriteValueARM64_OpARM64SUB_10(v *Value) bool { return false } func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21989,7 +21360,6 @@ func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { // result: (MOVDconst [d-c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22002,7 +21372,6 @@ func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { // result: (ADDconst [-c-d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SUBconst { break } @@ -22017,7 +21386,6 @@ func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { // result: (ADDconst [-c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } @@ -22031,13 +21399,13 @@ func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SUBshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBshiftLL x (MOVDconst [c]) [d]) // result: (SUBconst x [int64(uint64(c)<>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -22090,9 +21456,7 @@ func rewriteValueARM64_OpARM64SUBshiftRA_0(v *Value) bool { // result: (MOVDconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRAconst { break } @@ -22107,13 +21471,13 @@ func rewriteValueARM64_OpARM64SUBshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64SUBshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBshiftRL x (MOVDconst [c]) [d]) // result: (SUBconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -22128,9 +21492,7 @@ func rewriteValueARM64_OpARM64SUBshiftRL_0(v *Value) bool { // result: (MOVDconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRLconst { break } @@ -22145,13 +21507,13 @@ func rewriteValueARM64_OpARM64SUBshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64TST_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (TST x (MOVDconst [c])) // result: (TSTconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -22167,10 +21529,9 @@ func rewriteValueARM64_OpARM64TST_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (TSTshiftLL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { continue } @@ -22191,10 +21552,9 @@ func rewriteValueARM64_OpARM64TST_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (TSTshiftRL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { continue } @@ -22215,10 +21575,9 @@ func rewriteValueARM64_OpARM64TST_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (TSTshiftRA x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { continue } @@ -22238,13 +21597,13 @@ func rewriteValueARM64_OpARM64TST_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64TSTW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (TSTW x (MOVDconst [c])) // result: (TSTWconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -22259,12 +21618,12 @@ func rewriteValueARM64_OpARM64TSTW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64TSTWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TSTWconst (MOVDconst [x]) [y]) // cond: int32(x&y)==0 // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22280,7 +21639,6 @@ func rewriteValueARM64_OpARM64TSTWconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22296,7 +21654,6 @@ func rewriteValueARM64_OpARM64TSTWconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22310,12 +21667,12 @@ func rewriteValueARM64_OpARM64TSTWconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (TSTconst (MOVDconst [x]) [y]) // cond: int64(x&y)==0 // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22331,7 +21688,6 @@ func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool { // result: (FlagLT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22347,7 +21703,6 @@ func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool { // result: (FlagGT_UGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -22361,17 +21716,18 @@ func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64TSTshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftLL (MOVDconst [c]) x [d]) // result: (TSTconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) @@ -22384,9 +21740,7 @@ func rewriteValueARM64_OpARM64TSTshiftLL_0(v *Value) bool { // result: (TSTconst x [int64(uint64(c)< x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) @@ -22422,9 +21777,7 @@ func rewriteValueARM64_OpARM64TSTshiftRA_0(v *Value) bool { // result: (TSTconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -22437,17 +21790,18 @@ func rewriteValueARM64_OpARM64TSTshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (TSTshiftRL (MOVDconst [c]) x [d]) // result: (TSTconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) @@ -22460,9 +21814,7 @@ func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool { // result: (TSTconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -22475,12 +21827,12 @@ func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { + v_0 := v.Args[0] // match: (UBFIZ [bfc] (SLLconst [sc] x)) // cond: sc < getARM64BFwidth(bfc) // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) for { bfc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } @@ -22497,12 +21849,12 @@ func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { + v_0 := v.Args[0] // match: (UBFX [bfc] (SRLconst [sc] x)) // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64 // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) for { bfc := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } @@ -22521,7 +21873,6 @@ func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { // result: (ANDconst [1< x y) @@ -22697,8 +22038,8 @@ func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { if v.Type != typ.UInt64 { break } - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MSUB) v.Type = typ.UInt64 v.AddArg(x) @@ -22712,8 +22053,6 @@ func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { // match: (UMOD _ (MOVDconst [1])) // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { break } @@ -22725,9 +22064,7 @@ func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ANDconst [c-1] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -22743,13 +22080,10 @@ func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(uint64(c)%uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -22761,6 +22095,8 @@ func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (UMODW x y) @@ -22769,8 +22105,8 @@ func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { if v.Type != typ.UInt32 { break } - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MSUBW) v.Type = typ.UInt32 v.AddArg(x) @@ -22785,8 +22121,6 @@ func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { // cond: uint32(c)==1 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -22802,9 +22136,7 @@ func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { // cond: isPowerOfTwo(c) && is32Bit(c) // result: (ANDconst [c-1] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -22820,13 +22152,10 @@ func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { // match: (UMODW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(uint32(c)%uint32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } @@ -22838,15 +22167,15 @@ func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (XOR x (MOVDconst [c])) // result: (XORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MOVDconst { continue } @@ -22861,8 +22190,8 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // match: (XOR x x) // result: (MOVDconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpARM64MOVDconst) @@ -22872,10 +22201,8 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // match: (XOR x (MVN y)) // result: (EON x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpARM64MVN { continue } @@ -22891,10 +22218,9 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (XORshiftLL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SLLconst { continue } @@ -22915,10 +22241,9 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (XORshiftRL x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRLconst { continue } @@ -22939,10 +22264,9 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: clobberIfDead(x1) // result: (XORshiftRA x0 y [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] - x1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 if x1.Op != OpARM64SRAconst { continue } @@ -22963,9 +22287,7 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x (NEG y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SLL { continue } @@ -22980,7 +22302,6 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } @@ -23037,9 +22358,7 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (ROR x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { continue } @@ -23054,7 +22373,6 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { continue } @@ -23109,9 +22427,7 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x (NEG y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SLL { continue } @@ -23126,7 +22442,6 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } @@ -23184,9 +22499,7 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { // cond: cc.(Op) == OpARM64LessThanU // result: (RORW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { continue } @@ -23205,7 +22518,6 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { continue } @@ -23259,13 +22571,14 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -23277,7 +22590,7 @@ func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpARM64MVN) v.AddArg(x) return true @@ -23286,7 +22599,6 @@ func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { // result: (MOVDconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } @@ -23299,7 +22611,6 @@ func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { // result: (XORconst [c^d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpARM64XORconst { break } @@ -23313,18 +22624,19 @@ func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (XORshiftLL (MOVDconst [c]) x [d]) // result: (XORconst [c] (SLLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64XORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) @@ -23337,9 +22649,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { // result: (XORconst x [int64(uint64(c)< [8] (UBFX [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 { + if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { break } - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) || x != v_0.Args[0] { + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARM64REV16W) @@ -23421,12 +22729,11 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { // result: (EXTRconst [64-c] x2 x) for { c := v.AuxInt - x2 := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c { break } x := v_0.Args[0] + x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = 64 - c v.AddArg(x2) @@ -23439,13 +22746,12 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { for { t := v.Type c := v.AuxInt - x2 := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64UBFX { break } bfc := v_0.AuxInt x := v_0.Args[0] + x2 := v_1 if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } @@ -23458,17 +22764,18 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64XORshiftRA_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRA (MOVDconst [c]) x [d]) // result: (XORconst [c] (SRAconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64XORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) @@ -23481,9 +22788,7 @@ func rewriteValueARM64_OpARM64XORshiftRA_0(v *Value) bool { // result: (XORconst x [c>>uint64(d)]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -23498,9 +22803,7 @@ func rewriteValueARM64_OpARM64XORshiftRA_0(v *Value) bool { // result: (MOVDconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRAconst { break } @@ -23515,17 +22818,18 @@ func rewriteValueARM64_OpARM64XORshiftRA_0(v *Value) bool { return false } func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORshiftRL (MOVDconst [c]) x [d]) // result: (XORconst [c] (SRLconst x [d])) for { d := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpARM64XORconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) @@ -23538,9 +22842,7 @@ func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { // result: (XORconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -23555,9 +22857,7 @@ func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { // result: (MOVDconst [0]) for { d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64SRLconst { break } @@ -23573,9 +22873,11 @@ func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { // result: (RORconst [ c] x) for { c := v.AuxInt - x := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c || x != v_0.Args[0] { + if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c { + break + } + x := v_0.Args[0] + if x != v_1 { break } v.reset(OpARM64RORconst) @@ -23589,13 +22891,10 @@ func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { for { t := v.Type c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c { break } x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) { break } @@ -23607,21 +22906,24 @@ func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { return false } func rewriteValueARM64_OpAbs_0(v *Value) bool { + v_0 := v.Args[0] // match: (Abs x) // result: (FABSD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FABSD) v.AddArg(x) return true } } func rewriteValueARM64_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) @@ -23629,11 +22931,13 @@ func rewriteValueARM64_OpAdd16_0(v *Value) bool { } } func rewriteValueARM64_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) @@ -23641,11 +22945,13 @@ func rewriteValueARM64_OpAdd32_0(v *Value) bool { } } func rewriteValueARM64_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (FADDS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FADDS) v.AddArg(x) v.AddArg(y) @@ -23653,11 +22959,13 @@ func rewriteValueARM64_OpAdd32F_0(v *Value) bool { } } func rewriteValueARM64_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) @@ -23665,11 +22973,13 @@ func rewriteValueARM64_OpAdd64_0(v *Value) bool { } } func rewriteValueARM64_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (FADDD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FADDD) v.AddArg(x) v.AddArg(y) @@ -23677,11 +22987,13 @@ func rewriteValueARM64_OpAdd64F_0(v *Value) bool { } } func rewriteValueARM64_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) @@ -23689,11 +23001,13 @@ func rewriteValueARM64_OpAdd8_0(v *Value) bool { } } func rewriteValueARM64_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) @@ -23701,11 +23015,12 @@ func rewriteValueARM64_OpAddPtr_0(v *Value) bool { } } func rewriteValueARM64_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVDaddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpARM64MOVDaddr) v.Aux = sym v.AddArg(base) @@ -23713,11 +23028,13 @@ func rewriteValueARM64_OpAddr_0(v *Value) bool { } } func rewriteValueARM64_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64AND) v.AddArg(x) v.AddArg(y) @@ -23725,11 +23042,13 @@ func rewriteValueARM64_OpAnd16_0(v *Value) bool { } } func rewriteValueARM64_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64AND) v.AddArg(x) v.AddArg(y) @@ -23737,11 +23056,13 @@ func rewriteValueARM64_OpAnd32_0(v *Value) bool { } } func rewriteValueARM64_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And64 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64AND) v.AddArg(x) v.AddArg(y) @@ -23749,11 +23070,13 @@ func rewriteValueARM64_OpAnd64_0(v *Value) bool { } } func rewriteValueARM64_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64AND) v.AddArg(x) v.AddArg(y) @@ -23761,11 +23084,13 @@ func rewriteValueARM64_OpAnd8_0(v *Value) bool { } } func rewriteValueARM64_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64AND) v.AddArg(x) v.AddArg(y) @@ -23773,12 +23098,15 @@ func rewriteValueARM64_OpAndB_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicAdd32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd32 ptr val mem) // result: (LoweredAtomicAdd32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64LoweredAtomicAdd32) v.AddArg(ptr) v.AddArg(val) @@ -23787,12 +23115,15 @@ func rewriteValueARM64_OpAtomicAdd32_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicAdd32Variant_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd32Variant ptr val mem) // result: (LoweredAtomicAdd32Variant ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64LoweredAtomicAdd32Variant) v.AddArg(ptr) v.AddArg(val) @@ -23801,12 +23132,15 @@ func rewriteValueARM64_OpAtomicAdd32Variant_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicAdd64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd64 ptr val mem) // result: (LoweredAtomicAdd64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64LoweredAtomicAdd64) v.AddArg(ptr) v.AddArg(val) @@ -23815,12 +23149,15 @@ func rewriteValueARM64_OpAtomicAdd64_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicAdd64Variant_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd64Variant ptr val mem) // result: (LoweredAtomicAdd64Variant ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64LoweredAtomicAdd64Variant) v.AddArg(ptr) v.AddArg(val) @@ -23829,14 +23166,17 @@ func rewriteValueARM64_OpAtomicAdd64Variant_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicAnd8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicAnd8 ptr val mem) // result: (Select1 (LoweredAtomicAnd8 ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem)) v0.AddArg(ptr) @@ -23847,13 +23187,17 @@ func rewriteValueARM64_OpAtomicAnd8_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicCompareAndSwap32_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap32 ptr old new_ mem) // result: (LoweredAtomicCas32 ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpARM64LoweredAtomicCas32) v.AddArg(ptr) v.AddArg(old) @@ -23863,13 +23207,17 @@ func rewriteValueARM64_OpAtomicCompareAndSwap32_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicCompareAndSwap64_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap64 ptr old new_ mem) // result: (LoweredAtomicCas64 ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpARM64LoweredAtomicCas64) v.AddArg(ptr) v.AddArg(old) @@ -23879,12 +23227,15 @@ func rewriteValueARM64_OpAtomicCompareAndSwap64_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicExchange32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange32 ptr val mem) // result: (LoweredAtomicExchange32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64LoweredAtomicExchange32) v.AddArg(ptr) v.AddArg(val) @@ -23893,12 +23244,15 @@ func rewriteValueARM64_OpAtomicExchange32_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicExchange64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange64 ptr val mem) // result: (LoweredAtomicExchange64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64LoweredAtomicExchange64) v.AddArg(ptr) v.AddArg(val) @@ -23907,11 +23261,13 @@ func rewriteValueARM64_OpAtomicExchange64_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicLoad32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad32 ptr mem) // result: (LDARW ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64LDARW) v.AddArg(ptr) v.AddArg(mem) @@ -23919,11 +23275,13 @@ func rewriteValueARM64_OpAtomicLoad32_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicLoad64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad64 ptr mem) // result: (LDAR ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64LDAR) v.AddArg(ptr) v.AddArg(mem) @@ -23931,11 +23289,13 @@ func rewriteValueARM64_OpAtomicLoad64_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicLoad8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad8 ptr mem) // result: (LDARB ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64LDARB) v.AddArg(ptr) v.AddArg(mem) @@ -23943,11 +23303,13 @@ func rewriteValueARM64_OpAtomicLoad8_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicLoadPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadPtr ptr mem) // result: (LDAR ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64LDAR) v.AddArg(ptr) v.AddArg(mem) @@ -23955,14 +23317,17 @@ func rewriteValueARM64_OpAtomicLoadPtr_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicOr8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicOr8 ptr val mem) // result: (Select1 (LoweredAtomicOr8 ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem)) v0.AddArg(ptr) @@ -23973,12 +23338,15 @@ func rewriteValueARM64_OpAtomicOr8_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore32 ptr val mem) // result: (STLRW ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64STLRW) v.AddArg(ptr) v.AddArg(val) @@ -23987,12 +23355,15 @@ func rewriteValueARM64_OpAtomicStore32_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicStore64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore64 ptr val mem) // result: (STLR ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64STLR) v.AddArg(ptr) v.AddArg(val) @@ -24001,12 +23372,15 @@ func rewriteValueARM64_OpAtomicStore64_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicStore8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore8 ptr val mem) // result: (STLRB ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64STLRB) v.AddArg(ptr) v.AddArg(val) @@ -24015,12 +23389,15 @@ func rewriteValueARM64_OpAtomicStore8_0(v *Value) bool { } } func rewriteValueARM64_OpAtomicStorePtrNoWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStorePtrNoWB ptr val mem) // result: (STLR ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpARM64STLR) v.AddArg(ptr) v.AddArg(val) @@ -24029,13 +23406,15 @@ func rewriteValueARM64_OpAtomicStorePtrNoWB_0(v *Value) bool { } } func rewriteValueARM64_OpAvg64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Avg64u x y) // result: (ADD (SRLconst (SUB x y) [1]) y) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) v0.AuxInt = 1 @@ -24049,12 +23428,13 @@ func rewriteValueARM64_OpAvg64u_0(v *Value) bool { } } func rewriteValueARM64_OpBitLen32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen32 x) // result: (SUB (MOVDconst [32]) (CLZW x)) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 32 @@ -24066,12 +23446,13 @@ func rewriteValueARM64_OpBitLen32_0(v *Value) bool { } } func rewriteValueARM64_OpBitLen64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (SUB (MOVDconst [64]) (CLZ x)) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 64 @@ -24083,12 +23464,13 @@ func rewriteValueARM64_OpBitLen64_0(v *Value) bool { } } func rewriteValueARM64_OpBitRev16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitRev16 x) // result: (SRLconst [48] (RBIT x)) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SRLconst) v.AuxInt = 48 v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) @@ -24098,32 +23480,35 @@ func rewriteValueARM64_OpBitRev16_0(v *Value) bool { } } func rewriteValueARM64_OpBitRev32_0(v *Value) bool { + v_0 := v.Args[0] // match: (BitRev32 x) // result: (RBITW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64RBITW) v.AddArg(x) return true } } func rewriteValueARM64_OpBitRev64_0(v *Value) bool { + v_0 := v.Args[0] // match: (BitRev64 x) // result: (RBIT x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64RBIT) v.AddArg(x) return true } } func rewriteValueARM64_OpBitRev8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitRev8 x) // result: (SRLconst [56] (RBIT x)) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SRLconst) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) @@ -24133,43 +23518,49 @@ func rewriteValueARM64_OpBitRev8_0(v *Value) bool { } } func rewriteValueARM64_OpBswap32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Bswap32 x) // result: (REVW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64REVW) v.AddArg(x) return true } } func rewriteValueARM64_OpBswap64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Bswap64 x) // result: (REV x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64REV) v.AddArg(x) return true } } func rewriteValueARM64_OpCeil_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ceil x) // result: (FRINTPD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FRINTPD) v.AddArg(x) return true } } func rewriteValueARM64_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpARM64CALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -24179,54 +23570,61 @@ func rewriteValueARM64_OpClosureCall_0(v *Value) bool { } } func rewriteValueARM64_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MVN) v.AddArg(x) return true } } func rewriteValueARM64_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MVN) v.AddArg(x) return true } } func rewriteValueARM64_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com64 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MVN) v.AddArg(x) return true } } func rewriteValueARM64_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (MVN x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MVN) v.AddArg(x) return true } } func rewriteValueARM64_OpCondSelect_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CondSelect x y boolval) // cond: flagArg(boolval) != nil // result: (CSEL {boolval.Op} x y flagArg(boolval)) for { - boolval := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + boolval := v_2 if !(flagArg(boolval) != nil) { break } @@ -24241,9 +23639,9 @@ func rewriteValueARM64_OpCondSelect_0(v *Value) bool { // cond: flagArg(boolval) == nil // result: (CSEL {OpARM64NotEqual} x y (CMPWconst [0] boolval)) for { - boolval := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + boolval := v_2 if !(flagArg(boolval) == nil) { break } @@ -24339,13 +23737,14 @@ func rewriteValueARM64_OpConstNil_0(v *Value) bool { } } func rewriteValueARM64_OpCtz16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) // result: (CLZW (RBITW (ORconst [0x10000] x))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64CLZW) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32) @@ -24358,22 +23757,24 @@ func rewriteValueARM64_OpCtz16_0(v *Value) bool { } } func rewriteValueARM64_OpCtz16NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz16NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueARM64_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Ctz32 x) // result: (CLZW (RBITW x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64CLZW) v0 := b.NewValue0(v.Pos, OpARM64RBITW, t) v0.AddArg(x) @@ -24382,22 +23783,24 @@ func rewriteValueARM64_OpCtz32_0(v *Value) bool { } } func rewriteValueARM64_OpCtz32NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz32NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueARM64_OpCtz64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Ctz64 x) // result: (CLZ (RBIT x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64CLZ) v0 := b.NewValue0(v.Pos, OpARM64RBIT, t) v0.AddArg(x) @@ -24406,23 +23809,25 @@ func rewriteValueARM64_OpCtz64_0(v *Value) bool { } } func rewriteValueARM64_OpCtz64NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz64NonZero x) // result: (Ctz64 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz64) v.AddArg(x) return true } } func rewriteValueARM64_OpCtz8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) // result: (CLZW (RBITW (ORconst [0x100] x))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64CLZW) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32) @@ -24435,203 +23840,224 @@ func rewriteValueARM64_OpCtz8_0(v *Value) bool { } } func rewriteValueARM64_OpCtz8NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz8NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (FCVTZSSW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZSSW) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Fto32U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32U x) // result: (FCVTZUSW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZUSW) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64 x) // result: (FCVTZSS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZSS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (FCVTSD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTSD) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Fto64U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64U x) // result: (FCVTZUS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZUS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Uto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Uto32F x) // result: (UCVTFWS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64UCVTFWS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32Uto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Uto64F x) // result: (UCVTFWD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64UCVTFWD) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (SCVTFWS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SCVTFWS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (SCVTFWD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SCVTFWD) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (FCVTZSDW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZSDW) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (FCVTDS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTDS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Fto32U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32U x) // result: (FCVTZUDW x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZUDW) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64 x) // result: (FCVTZSD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZSD) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Fto64U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64U x) // result: (FCVTZUD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FCVTZUD) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Uto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Uto32F x) // result: (UCVTFS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64UCVTFS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64Uto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Uto64F x) // result: (UCVTFD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64UCVTFD) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to32F x) // result: (SCVTFS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SCVTFS) v.AddArg(x) return true } } func rewriteValueARM64_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to64F x) // result: (SCVTFD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64SCVTFD) v.AddArg(x) return true } } func rewriteValueARM64_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -24643,13 +24069,15 @@ func rewriteValueARM64_OpDiv16_0(v *Value) bool { } } func rewriteValueARM64_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -24661,11 +24089,13 @@ func rewriteValueARM64_OpDiv16u_0(v *Value) bool { } } func rewriteValueARM64_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32 x y) // result: (DIVW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64DIVW) v.AddArg(x) v.AddArg(y) @@ -24673,11 +24103,13 @@ func rewriteValueARM64_OpDiv32_0(v *Value) bool { } } func rewriteValueARM64_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (FDIVS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FDIVS) v.AddArg(x) v.AddArg(y) @@ -24685,11 +24117,13 @@ func rewriteValueARM64_OpDiv32F_0(v *Value) bool { } } func rewriteValueARM64_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32u x y) // result: (UDIVW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UDIVW) v.AddArg(x) v.AddArg(y) @@ -24697,11 +24131,13 @@ func rewriteValueARM64_OpDiv32u_0(v *Value) bool { } } func rewriteValueARM64_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64 x y) // result: (DIV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64DIV) v.AddArg(x) v.AddArg(y) @@ -24709,11 +24145,13 @@ func rewriteValueARM64_OpDiv64_0(v *Value) bool { } } func rewriteValueARM64_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (FDIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FDIVD) v.AddArg(x) v.AddArg(y) @@ -24721,11 +24159,13 @@ func rewriteValueARM64_OpDiv64F_0(v *Value) bool { } } func rewriteValueARM64_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64u x y) // result: (UDIV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UDIV) v.AddArg(x) v.AddArg(y) @@ -24733,13 +24173,15 @@ func rewriteValueARM64_OpDiv64u_0(v *Value) bool { } } func rewriteValueARM64_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -24751,13 +24193,15 @@ func rewriteValueARM64_OpDiv8_0(v *Value) bool { } } func rewriteValueARM64_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -24769,13 +24213,15 @@ func rewriteValueARM64_OpDiv8u_0(v *Value) bool { } } func rewriteValueARM64_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -24789,12 +24235,14 @@ func rewriteValueARM64_OpEq16_0(v *Value) bool { } } func rewriteValueARM64_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32 x y) // result: (Equal (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -24804,12 +24252,14 @@ func rewriteValueARM64_OpEq32_0(v *Value) bool { } } func rewriteValueARM64_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (Equal (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg(x) @@ -24819,12 +24269,14 @@ func rewriteValueARM64_OpEq32F_0(v *Value) bool { } } func rewriteValueARM64_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64 x y) // result: (Equal (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -24834,12 +24286,14 @@ func rewriteValueARM64_OpEq64_0(v *Value) bool { } } func rewriteValueARM64_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (Equal (FCMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg(x) @@ -24849,13 +24303,15 @@ func rewriteValueARM64_OpEq64F_0(v *Value) bool { } } func rewriteValueARM64_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -24869,13 +24325,15 @@ func rewriteValueARM64_OpEq8_0(v *Value) bool { } } func rewriteValueARM64_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (XOR (MOVDconst [1]) (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 1 @@ -24888,12 +24346,14 @@ func rewriteValueARM64_OpEqB_0(v *Value) bool { } } func rewriteValueARM64_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (Equal (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -24903,12 +24363,15 @@ func rewriteValueARM64_OpEqPtr_0(v *Value) bool { } } func rewriteValueARM64_OpFMA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMA x y z) // result: (FMADDD z x y) for { - z := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + z := v_2 v.reset(OpARM64FMADDD) v.AddArg(z) v.AddArg(x) @@ -24917,23 +24380,26 @@ func rewriteValueARM64_OpFMA_0(v *Value) bool { } } func rewriteValueARM64_OpFloor_0(v *Value) bool { + v_0 := v.Args[0] // match: (Floor x) // result: (FRINTMD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FRINTMD) v.AddArg(x) return true } } func rewriteValueARM64_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -24947,13 +24413,15 @@ func rewriteValueARM64_OpGeq16_0(v *Value) bool { } } func rewriteValueARM64_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -24967,12 +24435,14 @@ func rewriteValueARM64_OpGeq16U_0(v *Value) bool { } } func rewriteValueARM64_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32 x y) // result: (GreaterEqual (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -24982,12 +24452,14 @@ func rewriteValueARM64_OpGeq32_0(v *Value) bool { } } func rewriteValueARM64_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (GreaterEqualF (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg(x) @@ -24997,12 +24469,14 @@ func rewriteValueARM64_OpGeq32F_0(v *Value) bool { } } func rewriteValueARM64_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32U x y) // result: (GreaterEqualU (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25012,12 +24486,14 @@ func rewriteValueARM64_OpGeq32U_0(v *Value) bool { } } func rewriteValueARM64_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64 x y) // result: (GreaterEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25027,12 +24503,14 @@ func rewriteValueARM64_OpGeq64_0(v *Value) bool { } } func rewriteValueARM64_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (GreaterEqualF (FCMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg(x) @@ -25042,12 +24520,14 @@ func rewriteValueARM64_OpGeq64F_0(v *Value) bool { } } func rewriteValueARM64_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64U x y) // result: (GreaterEqualU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25057,13 +24537,15 @@ func rewriteValueARM64_OpGeq64U_0(v *Value) bool { } } func rewriteValueARM64_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -25077,13 +24559,15 @@ func rewriteValueARM64_OpGeq8_0(v *Value) bool { } } func rewriteValueARM64_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -25121,13 +24605,15 @@ func rewriteValueARM64_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueARM64_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -25141,13 +24627,15 @@ func rewriteValueARM64_OpGreater16_0(v *Value) bool { } } func rewriteValueARM64_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -25161,12 +24649,14 @@ func rewriteValueARM64_OpGreater16U_0(v *Value) bool { } } func rewriteValueARM64_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32 x y) // result: (GreaterThan (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25176,12 +24666,14 @@ func rewriteValueARM64_OpGreater32_0(v *Value) bool { } } func rewriteValueARM64_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (GreaterThanF (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg(x) @@ -25191,12 +24683,14 @@ func rewriteValueARM64_OpGreater32F_0(v *Value) bool { } } func rewriteValueARM64_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32U x y) // result: (GreaterThanU (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25206,12 +24700,14 @@ func rewriteValueARM64_OpGreater32U_0(v *Value) bool { } } func rewriteValueARM64_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64 x y) // result: (GreaterThan (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25221,12 +24717,14 @@ func rewriteValueARM64_OpGreater64_0(v *Value) bool { } } func rewriteValueARM64_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (GreaterThanF (FCMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg(x) @@ -25236,12 +24734,14 @@ func rewriteValueARM64_OpGreater64F_0(v *Value) bool { } } func rewriteValueARM64_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64U x y) // result: (GreaterThanU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25251,13 +24751,15 @@ func rewriteValueARM64_OpGreater64U_0(v *Value) bool { } } func rewriteValueARM64_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -25271,13 +24773,15 @@ func rewriteValueARM64_OpGreater8_0(v *Value) bool { } } func rewriteValueARM64_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64GreaterThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -25291,13 +24795,15 @@ func rewriteValueARM64_OpGreater8U_0(v *Value) bool { } } func rewriteValueARM64_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32 x y) // result: (SRAconst (MULL x y) [32]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRAconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) @@ -25308,13 +24814,15 @@ func rewriteValueARM64_OpHmul32_0(v *Value) bool { } } func rewriteValueARM64_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32u x y) // result: (SRAconst (UMULL x y) [32]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRAconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) @@ -25325,11 +24833,13 @@ func rewriteValueARM64_OpHmul32u_0(v *Value) bool { } } func rewriteValueARM64_OpHmul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64 x y) // result: (MULH x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MULH) v.AddArg(x) v.AddArg(y) @@ -25337,11 +24847,13 @@ func rewriteValueARM64_OpHmul64_0(v *Value) bool { } } func rewriteValueARM64_OpHmul64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64u x y) // result: (UMULH x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UMULH) v.AddArg(x) v.AddArg(y) @@ -25349,12 +24861,14 @@ func rewriteValueARM64_OpHmul64u_0(v *Value) bool { } } func rewriteValueARM64_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpARM64CALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -25363,12 +24877,14 @@ func rewriteValueARM64_OpInterCall_0(v *Value) bool { } } func rewriteValueARM64_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsInBounds idx len) // result: (LessThanU (CMP idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(idx) @@ -25378,11 +24894,12 @@ func rewriteValueARM64_OpIsInBounds_0(v *Value) bool { } } func rewriteValueARM64_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (IsNonNil ptr) // result: (NotEqual (CMPconst [0] ptr)) for { - ptr := v.Args[0] + ptr := v_0 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = 0 @@ -25392,12 +24909,14 @@ func rewriteValueARM64_OpIsNonNil_0(v *Value) bool { } } func rewriteValueARM64_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsSliceInBounds idx len) // result: (LessEqualU (CMP idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(idx) @@ -25407,13 +24926,15 @@ func rewriteValueARM64_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueARM64_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -25427,13 +24948,15 @@ func rewriteValueARM64_OpLeq16_0(v *Value) bool { } } func rewriteValueARM64_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -25447,12 +24970,14 @@ func rewriteValueARM64_OpLeq16U_0(v *Value) bool { } } func rewriteValueARM64_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32 x y) // result: (LessEqual (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25462,12 +24987,14 @@ func rewriteValueARM64_OpLeq32_0(v *Value) bool { } } func rewriteValueARM64_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (LessEqualF (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg(x) @@ -25477,12 +25004,14 @@ func rewriteValueARM64_OpLeq32F_0(v *Value) bool { } } func rewriteValueARM64_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32U x y) // result: (LessEqualU (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25492,12 +25021,14 @@ func rewriteValueARM64_OpLeq32U_0(v *Value) bool { } } func rewriteValueARM64_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64 x y) // result: (LessEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25507,12 +25038,14 @@ func rewriteValueARM64_OpLeq64_0(v *Value) bool { } } func rewriteValueARM64_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (LessEqualF (FCMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg(x) @@ -25522,12 +25055,14 @@ func rewriteValueARM64_OpLeq64F_0(v *Value) bool { } } func rewriteValueARM64_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64U x y) // result: (LessEqualU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25537,13 +25072,15 @@ func rewriteValueARM64_OpLeq64U_0(v *Value) bool { } } func rewriteValueARM64_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -25557,13 +25094,15 @@ func rewriteValueARM64_OpLeq8_0(v *Value) bool { } } func rewriteValueARM64_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -25577,13 +25116,15 @@ func rewriteValueARM64_OpLeq8U_0(v *Value) bool { } } func rewriteValueARM64_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -25597,13 +25138,15 @@ func rewriteValueARM64_OpLess16_0(v *Value) bool { } } func rewriteValueARM64_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -25617,12 +25160,14 @@ func rewriteValueARM64_OpLess16U_0(v *Value) bool { } } func rewriteValueARM64_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32 x y) // result: (LessThan (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25632,12 +25177,14 @@ func rewriteValueARM64_OpLess32_0(v *Value) bool { } } func rewriteValueARM64_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (LessThanF (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg(x) @@ -25647,12 +25194,14 @@ func rewriteValueARM64_OpLess32F_0(v *Value) bool { } } func rewriteValueARM64_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32U x y) // result: (LessThanU (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -25662,12 +25211,14 @@ func rewriteValueARM64_OpLess32U_0(v *Value) bool { } } func rewriteValueARM64_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64 x y) // result: (LessThan (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25677,12 +25228,14 @@ func rewriteValueARM64_OpLess64_0(v *Value) bool { } } func rewriteValueARM64_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (LessThanF (FCMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg(x) @@ -25692,12 +25245,14 @@ func rewriteValueARM64_OpLess64F_0(v *Value) bool { } } func rewriteValueARM64_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64U x y) // result: (LessThanU (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -25707,13 +25262,15 @@ func rewriteValueARM64_OpLess64U_0(v *Value) bool { } } func rewriteValueARM64_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -25727,13 +25284,15 @@ func rewriteValueARM64_OpLess8_0(v *Value) bool { } } func rewriteValueARM64_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -25747,13 +25306,15 @@ func rewriteValueARM64_OpLess8U_0(v *Value) bool { } } func rewriteValueARM64_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: t.IsBoolean() // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean()) { break } @@ -25767,8 +25328,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -25782,8 +25343,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && !isSigned(t)) { break } @@ -25797,8 +25358,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -25812,8 +25373,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVHUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -25827,8 +25388,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && isSigned(t)) { break } @@ -25842,8 +25403,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVWUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && !isSigned(t)) { break } @@ -25857,8 +25418,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } @@ -25872,8 +25433,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (FMOVSload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -25887,8 +25448,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { // result: (FMOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -25900,12 +25461,12 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool { return false } func rewriteValueARM64_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVDaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpARM64MOVDaddr) v.Aux = sym v.AddArg(base) @@ -25913,14 +25474,16 @@ func rewriteValueARM64_OpLocalAddr_0(v *Value) bool { } } func rewriteValueARM64_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -25942,14 +25505,16 @@ func rewriteValueARM64_OpLsh16x16_0(v *Value) bool { } } func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -25971,13 +25536,15 @@ func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { } } func rewriteValueARM64_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x64 x y) // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -25995,14 +25562,16 @@ func rewriteValueARM64_OpLsh16x64_0(v *Value) bool { } } func rewriteValueARM64_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26024,14 +25593,16 @@ func rewriteValueARM64_OpLsh16x8_0(v *Value) bool { } } func rewriteValueARM64_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26053,14 +25624,16 @@ func rewriteValueARM64_OpLsh32x16_0(v *Value) bool { } } func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26082,13 +25655,15 @@ func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { } } func rewriteValueARM64_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x64 x y) // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26106,14 +25681,16 @@ func rewriteValueARM64_OpLsh32x64_0(v *Value) bool { } } func rewriteValueARM64_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26135,14 +25712,16 @@ func rewriteValueARM64_OpLsh32x8_0(v *Value) bool { } } func rewriteValueARM64_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26164,14 +25743,16 @@ func rewriteValueARM64_OpLsh64x16_0(v *Value) bool { } } func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26193,13 +25774,15 @@ func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { } } func rewriteValueARM64_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x64 x y) // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26217,14 +25800,16 @@ func rewriteValueARM64_OpLsh64x64_0(v *Value) bool { } } func rewriteValueARM64_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26246,14 +25831,16 @@ func rewriteValueARM64_OpLsh64x8_0(v *Value) bool { } } func rewriteValueARM64_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26275,14 +25862,16 @@ func rewriteValueARM64_OpLsh8x16_0(v *Value) bool { } } func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26304,13 +25893,15 @@ func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { } } func rewriteValueARM64_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x64 x y) // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26328,14 +25919,16 @@ func rewriteValueARM64_OpLsh8x64_0(v *Value) bool { } } func rewriteValueARM64_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SLL, t) @@ -26357,13 +25950,15 @@ func rewriteValueARM64_OpLsh8x8_0(v *Value) bool { } } func rewriteValueARM64_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (MODW (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -26375,13 +25970,15 @@ func rewriteValueARM64_OpMod16_0(v *Value) bool { } } func rewriteValueARM64_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -26393,11 +25990,13 @@ func rewriteValueARM64_OpMod16u_0(v *Value) bool { } } func rewriteValueARM64_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod32 x y) // result: (MODW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MODW) v.AddArg(x) v.AddArg(y) @@ -26405,11 +26004,13 @@ func rewriteValueARM64_OpMod32_0(v *Value) bool { } } func rewriteValueARM64_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod32u x y) // result: (UMODW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UMODW) v.AddArg(x) v.AddArg(y) @@ -26417,11 +26018,13 @@ func rewriteValueARM64_OpMod32u_0(v *Value) bool { } } func rewriteValueARM64_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64 x y) // result: (MOD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MOD) v.AddArg(x) v.AddArg(y) @@ -26429,11 +26032,13 @@ func rewriteValueARM64_OpMod64_0(v *Value) bool { } } func rewriteValueARM64_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64u x y) // result: (UMOD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UMOD) v.AddArg(x) v.AddArg(y) @@ -26441,13 +26046,15 @@ func rewriteValueARM64_OpMod64u_0(v *Value) bool { } } func rewriteValueARM64_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (MODW (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -26459,13 +26066,15 @@ func rewriteValueARM64_OpMod8_0(v *Value) bool { } } func rewriteValueARM64_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -26477,6 +26086,9 @@ func rewriteValueARM64_OpMod8u_0(v *Value) bool { } } func rewriteValueARM64_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -26485,7 +26097,7 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -26497,9 +26109,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) @@ -26515,9 +26127,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVHstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) @@ -26533,9 +26145,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVWstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) @@ -26551,9 +26163,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVDstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) @@ -26569,9 +26181,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -26596,9 +26208,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 4 v.AddArg(dst) @@ -26623,9 +26235,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = 4 v.AddArg(dst) @@ -26650,9 +26262,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = 6 v.AddArg(dst) @@ -26686,9 +26298,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { if v.AuxInt != 12 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = 8 v.AddArg(dst) @@ -26710,6 +26322,9 @@ func rewriteValueARM64_OpMove_0(v *Value) bool { return false } func rewriteValueARM64_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -26719,9 +26334,9 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = 8 v.AddArg(dst) @@ -26746,9 +26361,9 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { if v.AuxInt != 24 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = 16 v.AddArg(dst) @@ -26781,9 +26396,9 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { // result: (Move [s%8] (OffPtr dst [s-s%8]) (OffPtr src [s-s%8]) (Move [s-s%8] dst src mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s%8 != 0 && s > 8) { break } @@ -26810,9 +26425,9 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { // result: (MOVDstore [s-8] dst (MOVDload [s-8] src mem) (DUFFCOPY [8*(64-(s-8)/16)] dst src mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice) { break } @@ -26837,9 +26452,9 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { break } @@ -26855,9 +26470,9 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { // result: (LoweredMove dst src (ADDconst src [s-8]) mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 24 && s%8 == 0) { break } @@ -26874,11 +26489,13 @@ func rewriteValueARM64_OpMove_10(v *Value) bool { return false } func rewriteValueARM64_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (MULW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MULW) v.AddArg(x) v.AddArg(y) @@ -26886,11 +26503,13 @@ func rewriteValueARM64_OpMul16_0(v *Value) bool { } } func rewriteValueARM64_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MULW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MULW) v.AddArg(x) v.AddArg(y) @@ -26898,11 +26517,13 @@ func rewriteValueARM64_OpMul32_0(v *Value) bool { } } func rewriteValueARM64_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (FMULS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FMULS) v.AddArg(x) v.AddArg(y) @@ -26910,11 +26531,13 @@ func rewriteValueARM64_OpMul32F_0(v *Value) bool { } } func rewriteValueARM64_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MUL) v.AddArg(x) v.AddArg(y) @@ -26922,11 +26545,13 @@ func rewriteValueARM64_OpMul64_0(v *Value) bool { } } func rewriteValueARM64_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (FMULD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FMULD) v.AddArg(x) v.AddArg(y) @@ -26934,11 +26559,13 @@ func rewriteValueARM64_OpMul64F_0(v *Value) bool { } } func rewriteValueARM64_OpMul64uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64uhilo x y) // result: (LoweredMuluhilo x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64LoweredMuluhilo) v.AddArg(x) v.AddArg(y) @@ -26946,11 +26573,13 @@ func rewriteValueARM64_OpMul64uhilo_0(v *Value) bool { } } func rewriteValueARM64_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (MULW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64MULW) v.AddArg(x) v.AddArg(y) @@ -26958,73 +26587,81 @@ func rewriteValueARM64_OpMul8_0(v *Value) bool { } } func rewriteValueARM64_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64NEG) v.AddArg(x) return true } } func rewriteValueARM64_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64NEG) v.AddArg(x) return true } } func rewriteValueARM64_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (FNEGS x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FNEGS) v.AddArg(x) return true } } func rewriteValueARM64_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64NEG) v.AddArg(x) return true } } func rewriteValueARM64_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (FNEGD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FNEGD) v.AddArg(x) return true } } func rewriteValueARM64_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64NEG) v.AddArg(x) return true } } func rewriteValueARM64_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -27038,12 +26675,14 @@ func rewriteValueARM64_OpNeq16_0(v *Value) bool { } } func rewriteValueARM64_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32 x y) // result: (NotEqual (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg(x) @@ -27053,12 +26692,14 @@ func rewriteValueARM64_OpNeq32_0(v *Value) bool { } } func rewriteValueARM64_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (NotEqual (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg(x) @@ -27068,12 +26709,14 @@ func rewriteValueARM64_OpNeq32F_0(v *Value) bool { } } func rewriteValueARM64_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64 x y) // result: (NotEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -27083,12 +26726,14 @@ func rewriteValueARM64_OpNeq64_0(v *Value) bool { } } func rewriteValueARM64_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (NotEqual (FCMPD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg(x) @@ -27098,13 +26743,15 @@ func rewriteValueARM64_OpNeq64F_0(v *Value) bool { } } func rewriteValueARM64_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -27118,11 +26765,13 @@ func rewriteValueARM64_OpNeq8_0(v *Value) bool { } } func rewriteValueARM64_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64XOR) v.AddArg(x) v.AddArg(y) @@ -27130,12 +26779,14 @@ func rewriteValueARM64_OpNeqB_0(v *Value) bool { } } func rewriteValueARM64_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (NotEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg(x) @@ -27145,11 +26796,13 @@ func rewriteValueARM64_OpNeqPtr_0(v *Value) bool { } } func rewriteValueARM64_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64LoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -27157,12 +26810,13 @@ func rewriteValueARM64_OpNilCheck_0(v *Value) bool { } } func rewriteValueARM64_OpNot_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Not x) // result: (XOR (MOVDconst [1]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 1 @@ -27172,11 +26826,12 @@ func rewriteValueARM64_OpNot_0(v *Value) bool { } } func rewriteValueARM64_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) // result: (MOVDaddr [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if ptr.Op != OpSP { break } @@ -27189,7 +26844,7 @@ func rewriteValueARM64_OpOffPtr_0(v *Value) bool { // result: (ADDconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpARM64ADDconst) v.AuxInt = off v.AddArg(ptr) @@ -27197,11 +26852,13 @@ func rewriteValueARM64_OpOffPtr_0(v *Value) bool { } } func rewriteValueARM64_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64OR) v.AddArg(x) v.AddArg(y) @@ -27209,11 +26866,13 @@ func rewriteValueARM64_OpOr16_0(v *Value) bool { } } func rewriteValueARM64_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64OR) v.AddArg(x) v.AddArg(y) @@ -27221,11 +26880,13 @@ func rewriteValueARM64_OpOr32_0(v *Value) bool { } } func rewriteValueARM64_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or64 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64OR) v.AddArg(x) v.AddArg(y) @@ -27233,11 +26894,13 @@ func rewriteValueARM64_OpOr64_0(v *Value) bool { } } func rewriteValueARM64_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64OR) v.AddArg(x) v.AddArg(y) @@ -27245,11 +26908,13 @@ func rewriteValueARM64_OpOr8_0(v *Value) bool { } } func rewriteValueARM64_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64OR) v.AddArg(x) v.AddArg(y) @@ -27257,14 +26922,17 @@ func rewriteValueARM64_OpOrB_0(v *Value) bool { } } func rewriteValueARM64_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -27280,9 +26948,9 @@ func rewriteValueARM64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -27298,9 +26966,9 @@ func rewriteValueARM64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -27314,13 +26982,14 @@ func rewriteValueARM64_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueARM64_OpPopCount16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount16 x) // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt16to64 x))))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64FMOVDfpgp) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) @@ -27336,13 +27005,14 @@ func rewriteValueARM64_OpPopCount16_0(v *Value) bool { } } func rewriteValueARM64_OpPopCount32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount32 x) // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt32to64 x))))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64FMOVDfpgp) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) @@ -27358,13 +27028,14 @@ func rewriteValueARM64_OpPopCount32_0(v *Value) bool { } } func rewriteValueARM64_OpPopCount64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount64 x) // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp x)))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64FMOVDfpgp) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) @@ -27378,15 +27049,15 @@ func rewriteValueARM64_OpPopCount64_0(v *Value) bool { } } func rewriteValueARM64_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVDconst [c])) // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -27409,12 +27080,14 @@ func rewriteValueARM64_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueARM64_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RotateLeft32 x y) // result: (RORW x (NEG y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64RORW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) @@ -27424,12 +27097,14 @@ func rewriteValueARM64_OpRotateLeft32_0(v *Value) bool { } } func rewriteValueARM64_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (RotateLeft64 x y) // result: (ROR x (NEG y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64ROR) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) @@ -27439,15 +27114,15 @@ func rewriteValueARM64_OpRotateLeft64_0(v *Value) bool { } } func rewriteValueARM64_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVDconst [c])) // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpARM64MOVDconst { break } @@ -27470,54 +27145,60 @@ func rewriteValueARM64_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueARM64_OpRound_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round x) // result: (FRINTAD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FRINTAD) v.AddArg(x) return true } } func rewriteValueARM64_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: (LoweredRound32F x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64LoweredRound32F) v.AddArg(x) return true } } func rewriteValueARM64_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: (LoweredRound64F x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64LoweredRound64F) v.AddArg(x) return true } } func rewriteValueARM64_OpRoundToEven_0(v *Value) bool { + v_0 := v.Args[0] // match: (RoundToEven x) // result: (FRINTND x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FRINTND) v.AddArg(x) return true } } func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27541,14 +27222,16 @@ func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27572,14 +27255,16 @@ func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27599,14 +27284,16 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27630,13 +27317,15 @@ func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -27660,13 +27349,15 @@ func rewriteValueARM64_OpRsh16x16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -27690,13 +27381,15 @@ func rewriteValueARM64_OpRsh16x32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -27716,13 +27409,15 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -27746,14 +27441,16 @@ func rewriteValueARM64_OpRsh16x8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27777,14 +27474,16 @@ func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27808,14 +27507,16 @@ func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27835,14 +27536,16 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -27866,13 +27569,15 @@ func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -27896,13 +27601,15 @@ func rewriteValueARM64_OpRsh32x16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -27926,13 +27633,15 @@ func rewriteValueARM64_OpRsh32x32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x y) // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -27952,13 +27661,15 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -27982,14 +27693,16 @@ func rewriteValueARM64_OpRsh32x8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28011,14 +27724,16 @@ func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28040,13 +27755,15 @@ func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux64 x y) // result: (CSEL {OpARM64LessThanU} (SRL x y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28064,14 +27781,16 @@ func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28093,13 +27812,15 @@ func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) @@ -28121,13 +27842,15 @@ func rewriteValueARM64_OpRsh64x16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x y) // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) @@ -28149,12 +27872,14 @@ func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 x y) // result: (SRA x (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) @@ -28172,13 +27897,15 @@ func rewriteValueARM64_OpRsh64x64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) @@ -28200,14 +27927,16 @@ func rewriteValueARM64_OpRsh64x8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28231,14 +27960,16 @@ func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28262,14 +27993,16 @@ func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28289,14 +28022,16 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64CSEL) v.Aux = OpARM64LessThanU v0 := b.NewValue0(v.Pos, OpARM64SRL, t) @@ -28320,13 +28055,15 @@ func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -28350,13 +28087,15 @@ func rewriteValueARM64_OpRsh8x16_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -28380,13 +28119,15 @@ func rewriteValueARM64_OpRsh8x32_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -28406,13 +28147,15 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool { } } func rewriteValueARM64_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -28436,12 +28179,12 @@ func rewriteValueARM64_OpRsh8x8_0(v *Value) bool { } } func rewriteValueARM64_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select0 (Add64carry x y c)) // result: (Select0 (ADCSflags x y (Select1 (ADDSconstflags [-1] c)))) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64carry { break } @@ -28465,7 +28208,6 @@ func rewriteValueARM64_OpSelect0_0(v *Value) bool { // match: (Select0 (Sub64borrow x y bo)) // result: (Select0 (SBCSflags x y (Select1 (NEGSflags bo)))) for { - v_0 := v.Args[0] if v_0.Op != OpSub64borrow { break } @@ -28488,12 +28230,12 @@ func rewriteValueARM64_OpSelect0_0(v *Value) bool { return false } func rewriteValueARM64_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select1 (Add64carry x y c)) // result: (ADCzerocarry (Select1 (ADCSflags x y (Select1 (ADDSconstflags [-1] c))))) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64carry { break } @@ -28519,7 +28261,6 @@ func rewriteValueARM64_OpSelect1_0(v *Value) bool { // match: (Select1 (Sub64borrow x y bo)) // result: (NEG (NGCzerocarry (Select1 (SBCSflags x y (Select1 (NEGSflags bo)))))) for { - v_0 := v.Args[0] if v_0.Op != OpSub64borrow { break } @@ -28546,72 +28287,79 @@ func rewriteValueARM64_OpSelect1_0(v *Value) bool { return false } func rewriteValueARM64_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVHreg) v.AddArg(x) return true } } func rewriteValueARM64_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to64 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVHreg) v.AddArg(x) return true } } func rewriteValueARM64_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt32to64 x) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVWreg) v.AddArg(x) return true } } func rewriteValueARM64_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVBreg) v.AddArg(x) return true } } func rewriteValueARM64_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVBreg) v.AddArg(x) return true } } func rewriteValueARM64_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to64 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVBreg) v.AddArg(x) return true } } func rewriteValueARM64_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SRAconst (NEG x) [63]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpARM64SRAconst) v.AuxInt = 63 v0 := b.NewValue0(v.Pos, OpARM64NEG, t) @@ -28621,22 +28369,24 @@ func rewriteValueARM64_OpSlicemask_0(v *Value) bool { } } func rewriteValueARM64_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (FSQRTD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FSQRTD) v.AddArg(x) return true } } func rewriteValueARM64_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpARM64CALLstatic) v.AuxInt = argwid v.Aux = target @@ -28645,14 +28395,17 @@ func rewriteValueARM64_OpStaticCall_0(v *Value) bool { } } func rewriteValueARM64_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 1 // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -28667,9 +28420,9 @@ func rewriteValueARM64_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -28684,9 +28437,9 @@ func rewriteValueARM64_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { break } @@ -28701,9 +28454,9 @@ func rewriteValueARM64_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { break } @@ -28718,9 +28471,9 @@ func rewriteValueARM64_OpStore_0(v *Value) bool { // result: (FMOVSstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -28735,9 +28488,9 @@ func rewriteValueARM64_OpStore_0(v *Value) bool { // result: (FMOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -28750,11 +28503,13 @@ func rewriteValueARM64_OpStore_0(v *Value) bool { return false } func rewriteValueARM64_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SUB) v.AddArg(x) v.AddArg(y) @@ -28762,11 +28517,13 @@ func rewriteValueARM64_OpSub16_0(v *Value) bool { } } func rewriteValueARM64_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SUB) v.AddArg(x) v.AddArg(y) @@ -28774,11 +28531,13 @@ func rewriteValueARM64_OpSub32_0(v *Value) bool { } } func rewriteValueARM64_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (FSUBS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FSUBS) v.AddArg(x) v.AddArg(y) @@ -28786,11 +28545,13 @@ func rewriteValueARM64_OpSub32F_0(v *Value) bool { } } func rewriteValueARM64_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SUB) v.AddArg(x) v.AddArg(y) @@ -28798,11 +28559,13 @@ func rewriteValueARM64_OpSub64_0(v *Value) bool { } } func rewriteValueARM64_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (FSUBD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64FSUBD) v.AddArg(x) v.AddArg(y) @@ -28810,11 +28573,13 @@ func rewriteValueARM64_OpSub64F_0(v *Value) bool { } } func rewriteValueARM64_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SUB) v.AddArg(x) v.AddArg(y) @@ -28822,11 +28587,13 @@ func rewriteValueARM64_OpSub8_0(v *Value) bool { } } func rewriteValueARM64_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64SUB) v.AddArg(x) v.AddArg(y) @@ -28834,20 +28601,22 @@ func rewriteValueARM64_OpSubPtr_0(v *Value) bool { } } func rewriteValueARM64_OpTrunc_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc x) // result: (FRINTZD x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64FRINTZD) v.AddArg(x) return true } } func rewriteValueARM64_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -28855,10 +28624,11 @@ func rewriteValueARM64_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueARM64_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -28866,10 +28636,11 @@ func rewriteValueARM64_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueARM64_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -28877,10 +28648,11 @@ func rewriteValueARM64_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueARM64_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -28888,10 +28660,11 @@ func rewriteValueARM64_OpTrunc64to16_0(v *Value) bool { } } func rewriteValueARM64_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -28899,10 +28672,11 @@ func rewriteValueARM64_OpTrunc64to32_0(v *Value) bool { } } func rewriteValueARM64_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -28910,13 +28684,16 @@ func rewriteValueARM64_OpTrunc64to8_0(v *Value) bool { } } func rewriteValueARM64_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpARM64LoweredWB) v.Aux = fn v.AddArg(destptr) @@ -28926,11 +28703,13 @@ func rewriteValueARM64_OpWB_0(v *Value) bool { } } func rewriteValueARM64_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64XOR) v.AddArg(x) v.AddArg(y) @@ -28938,11 +28717,13 @@ func rewriteValueARM64_OpXor16_0(v *Value) bool { } } func rewriteValueARM64_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64XOR) v.AddArg(x) v.AddArg(y) @@ -28950,11 +28731,13 @@ func rewriteValueARM64_OpXor32_0(v *Value) bool { } } func rewriteValueARM64_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor64 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64XOR) v.AddArg(x) v.AddArg(y) @@ -28962,11 +28745,13 @@ func rewriteValueARM64_OpXor64_0(v *Value) bool { } } func rewriteValueARM64_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpARM64XOR) v.AddArg(x) v.AddArg(y) @@ -28974,6 +28759,8 @@ func rewriteValueARM64_OpXor8_0(v *Value) bool { } } func rewriteValueARM64_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [0] _ mem) @@ -28982,7 +28769,7 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -28994,8 +28781,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) @@ -29010,8 +28797,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVHstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) @@ -29026,8 +28813,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVWstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) @@ -29042,8 +28829,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVDstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) @@ -29058,8 +28845,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 2 v.AddArg(ptr) @@ -29081,8 +28868,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 4 v.AddArg(ptr) @@ -29104,8 +28891,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 4 v.AddArg(ptr) @@ -29127,8 +28914,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 6 v.AddArg(ptr) @@ -29157,8 +28944,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { if v.AuxInt != 9 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 8 v.AddArg(ptr) @@ -29177,6 +28964,8 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { return false } func rewriteValueARM64_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [10] ptr mem) @@ -29185,8 +28974,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 10 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 8 v.AddArg(ptr) @@ -29208,8 +28997,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 11 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 10 v.AddArg(ptr) @@ -29238,8 +29027,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 12 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVWstore) v.AuxInt = 8 v.AddArg(ptr) @@ -29261,8 +29050,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 13 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 12 v.AddArg(ptr) @@ -29291,8 +29080,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 14 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = 12 v.AddArg(ptr) @@ -29321,8 +29110,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 15 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = 14 v.AddArg(ptr) @@ -29358,8 +29147,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64STP) v.AuxInt = 0 v.AddArg(ptr) @@ -29378,8 +29167,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 32 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64STP) v.AuxInt = 16 v.AddArg(ptr) @@ -29408,8 +29197,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 48 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64STP) v.AuxInt = 32 v.AddArg(ptr) @@ -29448,8 +29237,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { if v.AuxInt != 64 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpARM64STP) v.AuxInt = 48 v.AddArg(ptr) @@ -29495,6 +29284,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { return false } func rewriteValueARM64_OpZero_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (Zero [s] ptr mem) @@ -29502,8 +29293,8 @@ func rewriteValueARM64_OpZero_20(v *Value) bool { // result: (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem)) for { s := v.AuxInt - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s%16 != 0 && s%16 <= 8 && s > 16) { break } @@ -29525,8 +29316,8 @@ func rewriteValueARM64_OpZero_20(v *Value) bool { // result: (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem)) for { s := v.AuxInt - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s%16 != 0 && s%16 > 8 && s > 16) { break } @@ -29548,8 +29339,8 @@ func rewriteValueARM64_OpZero_20(v *Value) bool { // result: (DUFFZERO [4 * (64 - s/16)] ptr mem) for { s := v.AuxInt - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { break } @@ -29564,8 +29355,8 @@ func rewriteValueARM64_OpZero_20(v *Value) bool { // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem) for { s := v.AuxInt - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { break } @@ -29581,60 +29372,66 @@ func rewriteValueARM64_OpZero_20(v *Value) bool { return false } func rewriteValueARM64_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVHUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVHUreg) v.AddArg(x) return true } } func rewriteValueARM64_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to64 x) // result: (MOVHUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVHUreg) v.AddArg(x) return true } } func rewriteValueARM64_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt32to64 x) // result: (MOVWUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVWUreg) v.AddArg(x) return true } } func rewriteValueARM64_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVBUreg) v.AddArg(x) return true } } func rewriteValueARM64_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVBUreg) v.AddArg(x) return true } } func rewriteValueARM64_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to64 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpARM64MOVBUreg) v.AddArg(x) return true @@ -29680,9 +29477,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -29708,9 +29507,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -29808,9 +29609,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -29836,9 +29639,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30182,9 +29987,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30210,9 +30017,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30310,9 +30119,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30338,9 +30149,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30614,9 +30427,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30642,9 +30457,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30742,9 +30559,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -30770,9 +30589,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31156,9 +30977,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31184,9 +31007,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31284,9 +31109,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31312,9 +31139,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31562,9 +31391,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31590,9 +31421,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31690,9 +31523,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31718,9 +31553,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -31995,9 +31832,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -32023,9 +31862,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -32123,9 +31964,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -32151,9 +31994,11 @@ func rewriteBlockARM64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 912c4a1082..47adca632e 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -525,11 +525,13 @@ func rewriteValueMIPS(v *Value) bool { return false } func rewriteValueMIPS_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADD) v.AddArg(x) v.AddArg(y) @@ -537,11 +539,13 @@ func rewriteValueMIPS_OpAdd16_0(v *Value) bool { } } func rewriteValueMIPS_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADD) v.AddArg(x) v.AddArg(y) @@ -549,11 +553,13 @@ func rewriteValueMIPS_OpAdd32_0(v *Value) bool { } } func rewriteValueMIPS_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (ADDF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADDF) v.AddArg(x) v.AddArg(y) @@ -561,14 +567,17 @@ func rewriteValueMIPS_OpAdd32F_0(v *Value) bool { } } func rewriteValueMIPS_OpAdd32withcarry_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Add32withcarry x y c) // result: (ADD c (ADD x y)) for { t := v.Type - c := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + c := v_2 v.reset(OpMIPSADD) v.AddArg(c) v0 := b.NewValue0(v.Pos, OpMIPSADD, t) @@ -579,11 +588,13 @@ func rewriteValueMIPS_OpAdd32withcarry_0(v *Value) bool { } } func rewriteValueMIPS_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (ADDD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADDD) v.AddArg(x) v.AddArg(y) @@ -591,11 +602,13 @@ func rewriteValueMIPS_OpAdd64F_0(v *Value) bool { } } func rewriteValueMIPS_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADD) v.AddArg(x) v.AddArg(y) @@ -603,11 +616,13 @@ func rewriteValueMIPS_OpAdd8_0(v *Value) bool { } } func rewriteValueMIPS_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADD) v.AddArg(x) v.AddArg(y) @@ -615,11 +630,12 @@ func rewriteValueMIPS_OpAddPtr_0(v *Value) bool { } } func rewriteValueMIPS_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVWaddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpMIPSMOVWaddr) v.Aux = sym v.AddArg(base) @@ -627,11 +643,13 @@ func rewriteValueMIPS_OpAddr_0(v *Value) bool { } } func rewriteValueMIPS_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSAND) v.AddArg(x) v.AddArg(y) @@ -639,11 +657,13 @@ func rewriteValueMIPS_OpAnd16_0(v *Value) bool { } } func rewriteValueMIPS_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSAND) v.AddArg(x) v.AddArg(y) @@ -651,11 +671,13 @@ func rewriteValueMIPS_OpAnd32_0(v *Value) bool { } } func rewriteValueMIPS_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSAND) v.AddArg(x) v.AddArg(y) @@ -663,11 +685,13 @@ func rewriteValueMIPS_OpAnd8_0(v *Value) bool { } } func rewriteValueMIPS_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSAND) v.AddArg(x) v.AddArg(y) @@ -675,12 +699,15 @@ func rewriteValueMIPS_OpAndB_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicAdd32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd32 ptr val mem) // result: (LoweredAtomicAdd ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPSLoweredAtomicAdd) v.AddArg(ptr) v.AddArg(val) @@ -689,6 +716,9 @@ func rewriteValueMIPS_OpAtomicAdd32_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -696,9 +726,9 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool { // cond: !config.BigEndian // result: (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) (OR (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] ptr))) (NORconst [0] (SLL (MOVWconst [0xff]) (SLLconst [3] (ANDconst [3] ptr))))) mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(!config.BigEndian) { break } @@ -745,9 +775,9 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool { // cond: config.BigEndian // result: (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) (OR (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))) (NORconst [0] (SLL (MOVWconst [0xff]) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))))) mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(config.BigEndian) { break } @@ -799,13 +829,17 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool { return false } func rewriteValueMIPS_OpAtomicCompareAndSwap32_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap32 ptr old new_ mem) // result: (LoweredAtomicCas ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpMIPSLoweredAtomicCas) v.AddArg(ptr) v.AddArg(old) @@ -815,12 +849,15 @@ func rewriteValueMIPS_OpAtomicCompareAndSwap32_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicExchange32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange32 ptr val mem) // result: (LoweredAtomicExchange ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPSLoweredAtomicExchange) v.AddArg(ptr) v.AddArg(val) @@ -829,11 +866,13 @@ func rewriteValueMIPS_OpAtomicExchange32_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicLoad32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad32 ptr mem) // result: (LoweredAtomicLoad32 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSLoweredAtomicLoad32) v.AddArg(ptr) v.AddArg(mem) @@ -841,11 +880,13 @@ func rewriteValueMIPS_OpAtomicLoad32_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicLoad8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad8 ptr mem) // result: (LoweredAtomicLoad8 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSLoweredAtomicLoad8) v.AddArg(ptr) v.AddArg(mem) @@ -853,11 +894,13 @@ func rewriteValueMIPS_OpAtomicLoad8_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicLoadPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadPtr ptr mem) // result: (LoweredAtomicLoad32 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSLoweredAtomicLoad32) v.AddArg(ptr) v.AddArg(mem) @@ -865,6 +908,9 @@ func rewriteValueMIPS_OpAtomicLoadPtr_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -872,9 +918,9 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool { // cond: !config.BigEndian // result: (LoweredAtomicOr (AND (MOVWconst [^3]) ptr) (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] ptr))) mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(!config.BigEndian) { break } @@ -904,9 +950,9 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool { // cond: config.BigEndian // result: (LoweredAtomicOr (AND (MOVWconst [^3]) ptr) (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))) mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(config.BigEndian) { break } @@ -938,12 +984,15 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool { return false } func rewriteValueMIPS_OpAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore32 ptr val mem) // result: (LoweredAtomicStore32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPSLoweredAtomicStore32) v.AddArg(ptr) v.AddArg(val) @@ -952,12 +1001,15 @@ func rewriteValueMIPS_OpAtomicStore32_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicStore8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore8 ptr val mem) // result: (LoweredAtomicStore8 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPSLoweredAtomicStore8) v.AddArg(ptr) v.AddArg(val) @@ -966,12 +1018,15 @@ func rewriteValueMIPS_OpAtomicStore8_0(v *Value) bool { } } func rewriteValueMIPS_OpAtomicStorePtrNoWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStorePtrNoWB ptr val mem) // result: (LoweredAtomicStore32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPSLoweredAtomicStore32) v.AddArg(ptr) v.AddArg(val) @@ -980,13 +1035,15 @@ func rewriteValueMIPS_OpAtomicStorePtrNoWB_0(v *Value) bool { } } func rewriteValueMIPS_OpAvg32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Avg32u x y) // result: (ADD (SRLconst (SUB x y) [1]) y) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSADD) v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t) v0.AuxInt = 1 @@ -1000,13 +1057,14 @@ func rewriteValueMIPS_OpAvg32u_0(v *Value) bool { } } func rewriteValueMIPS_OpBitLen32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen32 x) // result: (SUB (MOVWconst [32]) (CLZ x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 32 @@ -1018,13 +1076,16 @@ func rewriteValueMIPS_OpBitLen32_0(v *Value) bool { } } func rewriteValueMIPS_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpMIPSCALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -1034,10 +1095,11 @@ func rewriteValueMIPS_OpClosureCall_0(v *Value) bool { } } func rewriteValueMIPS_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (NORconst [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNORconst) v.AuxInt = 0 v.AddArg(x) @@ -1045,10 +1107,11 @@ func rewriteValueMIPS_OpCom16_0(v *Value) bool { } } func rewriteValueMIPS_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (NORconst [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNORconst) v.AuxInt = 0 v.AddArg(x) @@ -1056,10 +1119,11 @@ func rewriteValueMIPS_OpCom32_0(v *Value) bool { } } func rewriteValueMIPS_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (NORconst [0] x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNORconst) v.AuxInt = 0 v.AddArg(x) @@ -1136,13 +1200,14 @@ func rewriteValueMIPS_OpConstNil_0(v *Value) bool { } } func rewriteValueMIPS_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz32 x) // result: (SUB (MOVWconst [32]) (CLZ (SUBconst [1] (AND x (NEG x))))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) v0.AuxInt = 32 @@ -1162,83 +1227,92 @@ func rewriteValueMIPS_OpCtz32_0(v *Value) bool { } } func rewriteValueMIPS_OpCtz32NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz32NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueMIPS_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (TRUNCFW x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSTRUNCFW) v.AddArg(x) return true } } func rewriteValueMIPS_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (MOVFD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVFD) v.AddArg(x) return true } } func rewriteValueMIPS_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (MOVWF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVWF) v.AddArg(x) return true } } func rewriteValueMIPS_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (MOVWD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVWD) v.AddArg(x) return true } } func rewriteValueMIPS_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (TRUNCDW x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSTRUNCDW) v.AddArg(x) return true } } func rewriteValueMIPS_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (MOVDF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVDF) v.AddArg(x) return true } } func rewriteValueMIPS_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -1252,13 +1326,15 @@ func rewriteValueMIPS_OpDiv16_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -1272,13 +1348,15 @@ func rewriteValueMIPS_OpDiv16u_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32 x y) // result: (Select1 (DIV x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v0.AddArg(x) @@ -1288,11 +1366,13 @@ func rewriteValueMIPS_OpDiv32_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (DIVF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSDIVF) v.AddArg(x) v.AddArg(y) @@ -1300,13 +1380,15 @@ func rewriteValueMIPS_OpDiv32F_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32u x y) // result: (Select1 (DIVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg(x) @@ -1316,11 +1398,13 @@ func rewriteValueMIPS_OpDiv32u_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (DIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSDIVD) v.AddArg(x) v.AddArg(y) @@ -1328,13 +1412,15 @@ func rewriteValueMIPS_OpDiv64F_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -1348,13 +1434,15 @@ func rewriteValueMIPS_OpDiv8_0(v *Value) bool { } } func rewriteValueMIPS_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -1368,13 +1456,15 @@ func rewriteValueMIPS_OpDiv8u_0(v *Value) bool { } } func rewriteValueMIPS_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) @@ -1389,13 +1479,15 @@ func rewriteValueMIPS_OpEq16_0(v *Value) bool { } } func rewriteValueMIPS_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x y) // result: (SGTUconst [1] (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) @@ -1406,12 +1498,14 @@ func rewriteValueMIPS_OpEq32_0(v *Value) bool { } } func rewriteValueMIPS_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (FPFlagTrue (CMPEQF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) v0.AddArg(x) @@ -1421,12 +1515,14 @@ func rewriteValueMIPS_OpEq32F_0(v *Value) bool { } } func rewriteValueMIPS_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (FPFlagTrue (CMPEQD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) v0.AddArg(x) @@ -1436,13 +1532,15 @@ func rewriteValueMIPS_OpEq64F_0(v *Value) bool { } } func rewriteValueMIPS_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) @@ -1457,13 +1555,15 @@ func rewriteValueMIPS_OpEq8_0(v *Value) bool { } } func rewriteValueMIPS_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (XORconst [1] (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool) @@ -1474,13 +1574,15 @@ func rewriteValueMIPS_OpEqB_0(v *Value) bool { } } func rewriteValueMIPS_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqPtr x y) // result: (SGTUconst [1] (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTUconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) @@ -1491,13 +1593,15 @@ func rewriteValueMIPS_OpEqPtr_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) @@ -1512,13 +1616,15 @@ func rewriteValueMIPS_OpGeq16_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -1533,13 +1639,15 @@ func rewriteValueMIPS_OpGeq16U_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32 x y) // result: (XORconst [1] (SGT y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) @@ -1550,12 +1658,14 @@ func rewriteValueMIPS_OpGeq32_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (FPFlagTrue (CMPGEF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) v0.AddArg(x) @@ -1565,13 +1675,15 @@ func rewriteValueMIPS_OpGeq32F_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32U x y) // result: (XORconst [1] (SGTU y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -1582,12 +1694,14 @@ func rewriteValueMIPS_OpGeq32U_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (FPFlagTrue (CMPGED x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) v0.AddArg(x) @@ -1597,13 +1711,15 @@ func rewriteValueMIPS_OpGeq64F_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) @@ -1618,13 +1734,15 @@ func rewriteValueMIPS_OpGeq8_0(v *Value) bool { } } func rewriteValueMIPS_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -1663,13 +1781,15 @@ func rewriteValueMIPS_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (SGT (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -1681,13 +1801,15 @@ func rewriteValueMIPS_OpGreater16_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -1699,11 +1821,13 @@ func rewriteValueMIPS_OpGreater16U_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32 x y) // result: (SGT x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGT) v.AddArg(x) v.AddArg(y) @@ -1711,12 +1835,14 @@ func rewriteValueMIPS_OpGreater32_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (FPFlagTrue (CMPGTF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) v0.AddArg(x) @@ -1726,11 +1852,13 @@ func rewriteValueMIPS_OpGreater32F_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32U x y) // result: (SGTU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v.AddArg(x) v.AddArg(y) @@ -1738,12 +1866,14 @@ func rewriteValueMIPS_OpGreater32U_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (FPFlagTrue (CMPGTD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) v0.AddArg(x) @@ -1753,13 +1883,15 @@ func rewriteValueMIPS_OpGreater64F_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (SGT (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -1771,13 +1903,15 @@ func rewriteValueMIPS_OpGreater8_0(v *Value) bool { } } func rewriteValueMIPS_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -1789,13 +1923,15 @@ func rewriteValueMIPS_OpGreater8U_0(v *Value) bool { } } func rewriteValueMIPS_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32 x y) // result: (Select0 (MULT x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32)) v0.AddArg(x) @@ -1805,13 +1941,15 @@ func rewriteValueMIPS_OpHmul32_0(v *Value) bool { } } func rewriteValueMIPS_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32u x y) // result: (Select0 (MULTU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg(x) @@ -1821,12 +1959,14 @@ func rewriteValueMIPS_OpHmul32u_0(v *Value) bool { } } func rewriteValueMIPS_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpMIPSCALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -1835,11 +1975,13 @@ func rewriteValueMIPS_OpInterCall_0(v *Value) bool { } } func rewriteValueMIPS_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsInBounds idx len) // result: (SGTU len idx) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpMIPSSGTU) v.AddArg(len) v.AddArg(idx) @@ -1847,12 +1989,13 @@ func rewriteValueMIPS_OpIsInBounds_0(v *Value) bool { } } func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsNonNil ptr) // result: (SGTU ptr (MOVWconst [0])) for { - ptr := v.Args[0] + ptr := v_0 v.reset(OpMIPSSGTU) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) @@ -1862,13 +2005,15 @@ func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool { } } func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsSliceInBounds idx len) // result: (XORconst [1] (SGTU idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -1879,13 +2024,15 @@ func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) @@ -1900,13 +2047,15 @@ func rewriteValueMIPS_OpLeq16_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -1921,13 +2070,15 @@ func rewriteValueMIPS_OpLeq16U_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32 x y) // result: (XORconst [1] (SGT x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) @@ -1938,12 +2089,14 @@ func rewriteValueMIPS_OpLeq32_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (FPFlagTrue (CMPGEF y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) v0.AddArg(y) @@ -1953,13 +2106,15 @@ func rewriteValueMIPS_OpLeq32F_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32U x y) // result: (XORconst [1] (SGTU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -1970,12 +2125,14 @@ func rewriteValueMIPS_OpLeq32U_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (FPFlagTrue (CMPGED y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) v0.AddArg(y) @@ -1985,13 +2142,15 @@ func rewriteValueMIPS_OpLeq64F_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) @@ -2006,13 +2165,15 @@ func rewriteValueMIPS_OpLeq8_0(v *Value) bool { } } func rewriteValueMIPS_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXORconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) @@ -2027,13 +2188,15 @@ func rewriteValueMIPS_OpLeq8U_0(v *Value) bool { } } func rewriteValueMIPS_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (SGT (SignExt16to32 y) (SignExt16to32 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(y) @@ -2045,13 +2208,15 @@ func rewriteValueMIPS_OpLess16_0(v *Value) bool { } } func rewriteValueMIPS_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(y) @@ -2063,11 +2228,13 @@ func rewriteValueMIPS_OpLess16U_0(v *Value) bool { } } func rewriteValueMIPS_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less32 x y) // result: (SGT y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGT) v.AddArg(y) v.AddArg(x) @@ -2075,12 +2242,14 @@ func rewriteValueMIPS_OpLess32_0(v *Value) bool { } } func rewriteValueMIPS_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (FPFlagTrue (CMPGTF y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) v0.AddArg(y) @@ -2090,11 +2259,13 @@ func rewriteValueMIPS_OpLess32F_0(v *Value) bool { } } func rewriteValueMIPS_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less32U x y) // result: (SGTU y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v.AddArg(y) v.AddArg(x) @@ -2102,12 +2273,14 @@ func rewriteValueMIPS_OpLess32U_0(v *Value) bool { } } func rewriteValueMIPS_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (FPFlagTrue (CMPGTD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) v0.AddArg(y) @@ -2117,13 +2290,15 @@ func rewriteValueMIPS_OpLess64F_0(v *Value) bool { } } func rewriteValueMIPS_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (SGT (SignExt8to32 y) (SignExt8to32 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGT) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(y) @@ -2135,13 +2310,15 @@ func rewriteValueMIPS_OpLess8_0(v *Value) bool { } } func rewriteValueMIPS_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) @@ -2153,13 +2330,15 @@ func rewriteValueMIPS_OpLess8U_0(v *Value) bool { } } func rewriteValueMIPS_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: t.IsBoolean() // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean()) { break } @@ -2173,8 +2352,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -2188,8 +2367,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && !isSigned(t)) { break } @@ -2203,8 +2382,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -2218,8 +2397,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVHUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -2233,8 +2412,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) || isPtr(t)) { break } @@ -2248,8 +2427,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVFload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -2263,8 +2442,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -2276,12 +2455,12 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool { return false } func rewriteValueMIPS_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVWaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpMIPSMOVWaddr) v.Aux = sym v.AddArg(base) @@ -2289,14 +2468,16 @@ func rewriteValueMIPS_OpLocalAddr_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // result: (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2317,14 +2498,16 @@ func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) // result: (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2341,13 +2524,13 @@ func rewriteValueMIPS_OpLsh16x32_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh16x64 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -2364,8 +2547,6 @@ func rewriteValueMIPS_OpLsh16x64_0(v *Value) bool { // cond: uint32(c) >= 16 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -2380,14 +2561,16 @@ func rewriteValueMIPS_OpLsh16x64_0(v *Value) bool { return false } func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // result: (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2408,14 +2591,16 @@ func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // result: (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2436,14 +2621,16 @@ func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) // result: (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2460,13 +2647,13 @@ func rewriteValueMIPS_OpLsh32x32_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh32x64 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -2483,8 +2670,6 @@ func rewriteValueMIPS_OpLsh32x64_0(v *Value) bool { // cond: uint32(c) >= 32 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -2499,14 +2684,16 @@ func rewriteValueMIPS_OpLsh32x64_0(v *Value) bool { return false } func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // result: (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2527,14 +2714,16 @@ func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // result: (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2555,14 +2744,16 @@ func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) // result: (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2579,13 +2770,13 @@ func rewriteValueMIPS_OpLsh8x32_0(v *Value) bool { } } func rewriteValueMIPS_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh8x64 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -2602,8 +2793,6 @@ func rewriteValueMIPS_OpLsh8x64_0(v *Value) bool { // cond: uint32(c) >= 8 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -2618,14 +2807,16 @@ func rewriteValueMIPS_OpLsh8x64_0(v *Value) bool { return false } func rewriteValueMIPS_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // result: (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg(x) @@ -2646,13 +2837,13 @@ func rewriteValueMIPS_OpLsh8x8_0(v *Value) bool { } } func rewriteValueMIPS_OpMIPSADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADD x (MOVWconst [c])) // result: (ADDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPSMOVWconst { continue } @@ -2667,10 +2858,8 @@ func rewriteValueMIPS_OpMIPSADD_0(v *Value) bool { // match: (ADD x (NEG y)) // result: (SUB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPSNEG { continue } @@ -2685,11 +2874,11 @@ func rewriteValueMIPS_OpMIPSADD_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSADDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) // result: (MOVWaddr [off1+off2] {sym} ptr) for { off1 := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } @@ -2708,7 +2897,7 @@ func rewriteValueMIPS_OpMIPSADDconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -2718,7 +2907,6 @@ func rewriteValueMIPS_OpMIPSADDconst_0(v *Value) bool { // result: (MOVWconst [int64(int32(c+d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -2731,7 +2919,6 @@ func rewriteValueMIPS_OpMIPSADDconst_0(v *Value) bool { // result: (ADDconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSADDconst { break } @@ -2746,7 +2933,6 @@ func rewriteValueMIPS_OpMIPSADDconst_0(v *Value) bool { // result: (ADDconst [int64(int32(c-d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSSUBconst { break } @@ -2760,14 +2946,14 @@ func rewriteValueMIPS_OpMIPSADDconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSAND_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (AND x (MOVWconst [c])) // result: (ANDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPSMOVWconst { continue } @@ -2782,8 +2968,8 @@ func rewriteValueMIPS_OpMIPSAND_0(v *Value) bool { // match: (AND x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -2794,14 +2980,11 @@ func rewriteValueMIPS_OpMIPSAND_0(v *Value) bool { // match: (AND (SGTUconst [1] x) (SGTUconst [1] y)) // result: (SGTUconst [1] (OR x y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSSGTUconst || v_0.AuxInt != 1 { continue } x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpMIPSSGTUconst || v_1.AuxInt != 1 { continue } @@ -2819,6 +3002,7 @@ func rewriteValueMIPS_OpMIPSAND_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSANDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [0] _) // result: (MOVWconst [0]) for { @@ -2835,7 +3019,7 @@ func rewriteValueMIPS_OpMIPSANDconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -2845,7 +3029,6 @@ func rewriteValueMIPS_OpMIPSANDconst_0(v *Value) bool { // result: (MOVWconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -2858,7 +3041,6 @@ func rewriteValueMIPS_OpMIPSANDconst_0(v *Value) bool { // result: (ANDconst [c&d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -2872,12 +3054,13 @@ func rewriteValueMIPS_OpMIPSANDconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSCMOVZ_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMOVZ _ f (MOVWconst [0])) // result: f for { - _ = v.Args[2] - f := v.Args[1] - v_2 := v.Args[2] + f := v_1 if v_2.Op != OpMIPSMOVWconst || v_2.AuxInt != 0 { break } @@ -2890,9 +3073,7 @@ func rewriteValueMIPS_OpMIPSCMOVZ_0(v *Value) bool { // cond: c!=0 // result: a for { - _ = v.Args[2] - a := v.Args[0] - v_2 := v.Args[2] + a := v_0 if v_2.Op != OpMIPSMOVWconst { break } @@ -2908,12 +3089,11 @@ func rewriteValueMIPS_OpMIPSCMOVZ_0(v *Value) bool { // match: (CMOVZ a (MOVWconst [0]) c) // result: (CMOVZzero a c) for { - c := v.Args[2] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } + c := v_2 v.reset(OpMIPSCMOVZzero) v.AddArg(a) v.AddArg(c) @@ -2922,11 +3102,11 @@ func rewriteValueMIPS_OpMIPSCMOVZ_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSCMOVZzero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CMOVZzero _ (MOVWconst [0])) // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } @@ -2938,9 +3118,7 @@ func rewriteValueMIPS_OpMIPSCMOVZzero_0(v *Value) bool { // cond: c!=0 // result: a for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] + a := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -2956,17 +3134,19 @@ func rewriteValueMIPS_OpMIPSCMOVZzero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSLoweredAtomicAdd_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem) // cond: is16Bit(c) // result: (LoweredAtomicAddconst [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c)) { break } @@ -2979,15 +3159,17 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicAdd_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSLoweredAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LoweredAtomicStore32 ptr (MOVWconst [0]) mem) // result: (LoweredAtomicStorezero ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPSLoweredAtomicStorezero) v.AddArg(ptr) v.AddArg(mem) @@ -2996,19 +3178,21 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicStore32_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVBUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3025,14 +3209,13 @@ func rewriteValueMIPS_OpMIPSMOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -3049,9 +3232,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVBstore { break } @@ -3070,11 +3251,12 @@ func rewriteValueMIPS_OpMIPSMOVBUload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBUreg x:(MOVBUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUload { break } @@ -3086,7 +3268,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { // match: (MOVBUreg x:(MOVBUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUreg { break } @@ -3099,7 +3281,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { // result: @x.Block (MOVBUload [off] {sym} ptr mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBload { break } @@ -3123,7 +3305,6 @@ func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { // match: (MOVBUreg (ANDconst [c] x)) // result: (ANDconst [c&0xff] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -3137,7 +3318,6 @@ func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { // match: (MOVBUreg (MOVWconst [c])) // result: (MOVWconst [int64(uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -3149,19 +3329,21 @@ func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVBload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3178,14 +3360,13 @@ func rewriteValueMIPS_OpMIPSMOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -3202,9 +3383,7 @@ func rewriteValueMIPS_OpMIPSMOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVBstore { break } @@ -3223,11 +3402,12 @@ func rewriteValueMIPS_OpMIPSMOVBload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBreg x:(MOVBload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBload { break } @@ -3239,7 +3419,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { // match: (MOVBreg x:(MOVBreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBreg { break } @@ -3252,7 +3432,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { // result: @x.Block (MOVBload [off] {sym} ptr mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUload { break } @@ -3277,7 +3457,6 @@ func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { // cond: c & 0x80 == 0 // result: (ANDconst [c&0x7f] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -3294,7 +3473,6 @@ func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { // match: (MOVBreg (MOVWconst [c])) // result: (MOVWconst [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -3306,20 +3484,23 @@ func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3337,15 +3518,14 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -3362,12 +3542,11 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPSMOVBstorezero) v.AuxInt = off v.Aux = sym @@ -3380,13 +3559,12 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVBreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym @@ -3400,13 +3578,12 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVBUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym @@ -3420,13 +3597,12 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym @@ -3440,13 +3616,12 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym @@ -3460,13 +3635,12 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = off v.Aux = sym @@ -3478,19 +3652,21 @@ func rewriteValueMIPS_OpMIPSMOVBstore_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVBstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3507,14 +3683,13 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -3528,19 +3703,21 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVDload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3557,14 +3734,13 @@ func rewriteValueMIPS_OpMIPSMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -3581,9 +3757,7 @@ func rewriteValueMIPS_OpMIPSMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVDstore { break } @@ -3603,20 +3777,23 @@ func rewriteValueMIPS_OpMIPSMOVDload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3634,15 +3811,14 @@ func rewriteValueMIPS_OpMIPSMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -3657,19 +3833,21 @@ func rewriteValueMIPS_OpMIPSMOVDstore_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVFload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVFload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3686,14 +3864,13 @@ func rewriteValueMIPS_OpMIPSMOVFload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -3710,9 +3887,7 @@ func rewriteValueMIPS_OpMIPSMOVFload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVFstore { break } @@ -3732,20 +3907,23 @@ func rewriteValueMIPS_OpMIPSMOVFload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVFstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVFstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3763,15 +3941,14 @@ func rewriteValueMIPS_OpMIPSMOVFstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -3786,19 +3963,21 @@ func rewriteValueMIPS_OpMIPSMOVFstore_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVHUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3815,14 +3994,13 @@ func rewriteValueMIPS_OpMIPSMOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -3839,9 +4017,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVHstore { break } @@ -3860,11 +4036,12 @@ func rewriteValueMIPS_OpMIPSMOVHUload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVHUreg x:(MOVBUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUload { break } @@ -3876,7 +4053,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVHUload { break } @@ -3888,7 +4065,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVBUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUreg { break } @@ -3899,7 +4076,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVHUreg { break } @@ -3912,7 +4089,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { // result: @x.Block (MOVHUload [off] {sym} ptr mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVHload { break } @@ -3936,7 +4113,6 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { // match: (MOVHUreg (ANDconst [c] x)) // result: (ANDconst [c&0xffff] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -3950,7 +4126,6 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { // match: (MOVHUreg (MOVWconst [c])) // result: (MOVWconst [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -3962,19 +4137,21 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVHload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -3991,14 +4168,13 @@ func rewriteValueMIPS_OpMIPSMOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -4015,9 +4191,7 @@ func rewriteValueMIPS_OpMIPSMOVHload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVHstore { break } @@ -4036,11 +4210,12 @@ func rewriteValueMIPS_OpMIPSMOVHload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVHreg x:(MOVBload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBload { break } @@ -4052,7 +4227,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUload { break } @@ -4064,7 +4239,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHload _ _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVHload { break } @@ -4076,7 +4251,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBreg { break } @@ -4087,7 +4262,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVBUreg { break } @@ -4098,7 +4273,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHreg _)) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVHreg { break } @@ -4111,7 +4286,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // result: @x.Block (MOVHload [off] {sym} ptr mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpMIPSMOVHUload { break } @@ -4136,7 +4311,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // cond: c & 0x8000 == 0 // result: (ANDconst [c&0x7fff] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -4153,7 +4327,6 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { // match: (MOVHreg (MOVWconst [c])) // result: (MOVWconst [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4165,20 +4338,23 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVHstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -4196,15 +4372,14 @@ func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -4221,12 +4396,11 @@ func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPSMOVHstorezero) v.AuxInt = off v.Aux = sym @@ -4239,13 +4413,12 @@ func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym @@ -4259,13 +4432,12 @@ func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym @@ -4279,13 +4451,12 @@ func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVHstore) v.AuxInt = off v.Aux = sym @@ -4297,19 +4468,21 @@ func rewriteValueMIPS_OpMIPSMOVHstore_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVHstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -4326,14 +4499,13 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -4347,19 +4519,21 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVWload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -4376,14 +4550,13 @@ func rewriteValueMIPS_OpMIPSMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -4400,9 +4573,7 @@ func rewriteValueMIPS_OpMIPSMOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWstore { break } @@ -4422,11 +4593,12 @@ func rewriteValueMIPS_OpMIPSMOVWload_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVWreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg x) // cond: x.Uses == 1 // result: (MOVWnop x) for { - x := v.Args[0] + x := v_0 if !(x.Uses == 1) { break } @@ -4437,7 +4609,6 @@ func rewriteValueMIPS_OpMIPSMOVWreg_0(v *Value) bool { // match: (MOVWreg (MOVWconst [c])) // result: (MOVWconst [c]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4449,20 +4620,23 @@ func rewriteValueMIPS_OpMIPSMOVWreg_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -4480,15 +4654,14 @@ func rewriteValueMIPS_OpMIPSMOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2)) { break } @@ -4505,12 +4678,11 @@ func rewriteValueMIPS_OpMIPSMOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPSMOVWstorezero) v.AuxInt = off v.Aux = sym @@ -4523,13 +4695,12 @@ func rewriteValueMIPS_OpMIPSMOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPSMOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPSMOVWstore) v.AuxInt = off v.Aux = sym @@ -4541,19 +4712,21 @@ func rewriteValueMIPS_OpMIPSMOVWstore_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVWstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVWstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - x := v.Args[0] + x := v_0 if x.Op != OpMIPSADDconst { break } off2 := x.AuxInt ptr := x.Args[0] + mem := v_1 if !(is16Bit(off1+off2) || x.Uses == 1) { break } @@ -4570,14 +4743,13 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2)) { break } @@ -4591,12 +4763,12 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MUL (MOVWconst [0]) _ ) // result: (MOVWconst [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 0 { continue } @@ -4609,13 +4781,11 @@ func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { // match: (MUL (MOVWconst [1]) x ) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4626,13 +4796,11 @@ func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { // match: (MUL (MOVWconst [-1]) x ) // result: (NEG x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpMIPSNEG) v.AddArg(x) return true @@ -4643,14 +4811,12 @@ func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { // cond: isPowerOfTwo(int64(uint32(c))) // result: (SLLconst [log2(int64(uint32(c)))] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSMOVWconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if !(isPowerOfTwo(int64(uint32(c)))) { continue } @@ -4664,14 +4830,11 @@ func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { // match: (MUL (MOVWconst [c]) (MOVWconst [d])) // result: (MOVWconst [int64(int32(c)*int32(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSMOVWconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpMIPSMOVWconst { continue } @@ -4685,10 +4848,10 @@ func rewriteValueMIPS_OpMIPSMUL_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSNEG_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEG (MOVWconst [c])) // result: (MOVWconst [int64(int32(-c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4700,13 +4863,13 @@ func rewriteValueMIPS_OpMIPSNEG_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSNOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NOR x (MOVWconst [c])) // result: (NORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPSMOVWconst { continue } @@ -4721,11 +4884,11 @@ func rewriteValueMIPS_OpMIPSNOR_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSNORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (NORconst [c] (MOVWconst [d])) // result: (MOVWconst [^(c|d)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4737,14 +4900,14 @@ func rewriteValueMIPS_OpMIPSNORconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (OR x (MOVWconst [c])) // result: (ORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPSMOVWconst { continue } @@ -4759,8 +4922,8 @@ func rewriteValueMIPS_OpMIPSOR_0(v *Value) bool { // match: (OR x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -4771,14 +4934,11 @@ func rewriteValueMIPS_OpMIPSOR_0(v *Value) bool { // match: (OR (SGTUzero x) (SGTUzero y)) // result: (SGTUzero (OR x y)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMIPSSGTUzero { continue } x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpMIPSSGTUzero { continue } @@ -4795,13 +4955,14 @@ func rewriteValueMIPS_OpMIPSOR_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4821,7 +4982,6 @@ func rewriteValueMIPS_OpMIPSORconst_0(v *Value) bool { // result: (MOVWconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4834,7 +4994,6 @@ func rewriteValueMIPS_OpMIPSORconst_0(v *Value) bool { // result: (ORconst [c|d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSORconst { break } @@ -4848,15 +5007,16 @@ func rewriteValueMIPS_OpMIPSORconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGT_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SGT (MOVWconst [c]) x) // result: (SGTconst [c] x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpMIPSSGTconst) v.AuxInt = c v.AddArg(x) @@ -4865,9 +5025,7 @@ func rewriteValueMIPS_OpMIPSSGT_0(v *Value) bool { // match: (SGT x (MOVWconst [0])) // result: (SGTzero x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } @@ -4878,15 +5036,16 @@ func rewriteValueMIPS_OpMIPSSGT_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGTU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SGTU (MOVWconst [c]) x) // result: (SGTUconst [c] x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpMIPSSGTUconst) v.AuxInt = c v.AddArg(x) @@ -4895,9 +5054,7 @@ func rewriteValueMIPS_OpMIPSSGTU_0(v *Value) bool { // match: (SGTU x (MOVWconst [0])) // result: (SGTUzero x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 { break } @@ -4908,12 +5065,12 @@ func rewriteValueMIPS_OpMIPSSGTU_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SGTUconst [c] (MOVWconst [d])) // cond: uint32(c)>uint32(d) // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4930,7 +5087,6 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { // result: (MOVWconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -4947,7 +5103,6 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVBUreg || !(0xff < uint32(c)) { break } @@ -4960,7 +5115,6 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVHUreg || !(0xffff < uint32(c)) { break } @@ -4973,7 +5127,6 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -4990,7 +5143,6 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSSRLconst { break } @@ -5005,11 +5157,11 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGTUzero_0(v *Value) bool { + v_0 := v.Args[0] // match: (SGTUzero (MOVWconst [d])) // cond: uint32(d) != 0 // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5025,7 +5177,6 @@ func rewriteValueMIPS_OpMIPSSGTUzero_0(v *Value) bool { // cond: uint32(d) == 0 // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5040,12 +5191,12 @@ func rewriteValueMIPS_OpMIPSSGTUzero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SGTconst [c] (MOVWconst [d])) // cond: int32(c) > int32(d) // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5062,7 +5213,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5079,7 +5229,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVBreg || !(0x7f < int32(c)) { break } @@ -5092,7 +5241,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVBreg || !(int32(c) <= -0x80) { break } @@ -5105,7 +5253,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVBUreg || !(0xff < int32(c)) { break } @@ -5118,7 +5265,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVBUreg || !(int32(c) < 0) { break } @@ -5131,7 +5277,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVHreg || !(0x7fff < int32(c)) { break } @@ -5144,7 +5289,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVHreg || !(int32(c) <= -0x8000) { break } @@ -5157,7 +5301,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVHUreg || !(0xffff < int32(c)) { break } @@ -5170,7 +5313,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { // result: (MOVWconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVHUreg || !(int32(c) < 0) { break } @@ -5181,12 +5323,12 @@ func rewriteValueMIPS_OpMIPSSGTconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGTconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (SGTconst [c] (ANDconst [m] _)) // cond: 0 <= int32(m) && int32(m) < int32(c) // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSANDconst { break } @@ -5203,7 +5345,6 @@ func rewriteValueMIPS_OpMIPSSGTconst_10(v *Value) bool { // result: (MOVWconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSSRLconst { break } @@ -5218,11 +5359,11 @@ func rewriteValueMIPS_OpMIPSSGTconst_10(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGTzero_0(v *Value) bool { + v_0 := v.Args[0] // match: (SGTzero (MOVWconst [d])) // cond: int32(d) > 0 // result: (MOVWconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5238,7 +5379,6 @@ func rewriteValueMIPS_OpMIPSSGTzero_0(v *Value) bool { // cond: int32(d) <= 0 // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5253,12 +5393,12 @@ func rewriteValueMIPS_OpMIPSSGTzero_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SLL _ (MOVWconst [c])) // cond: uint32(c)>=32 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpMIPSMOVWconst { break } @@ -5273,9 +5413,7 @@ func rewriteValueMIPS_OpMIPSSLL_0(v *Value) bool { // match: (SLL x (MOVWconst [c])) // result: (SLLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -5288,11 +5426,11 @@ func rewriteValueMIPS_OpMIPSSLL_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSLLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SLLconst [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(uint32(d)<=32 // result: (SRAconst x [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -5326,9 +5464,7 @@ func rewriteValueMIPS_OpMIPSSRA_0(v *Value) bool { // match: (SRA x (MOVWconst [c])) // result: (SRAconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -5341,11 +5477,11 @@ func rewriteValueMIPS_OpMIPSSRA_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSRAconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRAconst [c] (MOVWconst [d])) // result: (MOVWconst [int64(int32(d)>>uint32(c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5357,12 +5493,12 @@ func rewriteValueMIPS_OpMIPSSRAconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSRL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SRL _ (MOVWconst [c])) // cond: uint32(c)>=32 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpMIPSMOVWconst { break } @@ -5377,9 +5513,7 @@ func rewriteValueMIPS_OpMIPSSRL_0(v *Value) bool { // match: (SRL x (MOVWconst [c])) // result: (SRLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -5392,11 +5526,11 @@ func rewriteValueMIPS_OpMIPSSRL_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSRLconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRLconst [c] (MOVWconst [d])) // result: (MOVWconst [int64(uint32(d)>>uint32(c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5408,12 +5542,12 @@ func rewriteValueMIPS_OpMIPSSRLconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSUB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUB x (MOVWconst [c])) // result: (SUBconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -5426,8 +5560,8 @@ func rewriteValueMIPS_OpMIPSSUB_0(v *Value) bool { // match: (SUB x x) // result: (MOVWconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpMIPSMOVWconst) @@ -5437,11 +5571,10 @@ func rewriteValueMIPS_OpMIPSSUB_0(v *Value) bool { // match: (SUB (MOVWconst [0]) x) // result: (NEG x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 0 { break } + x := v_1 v.reset(OpMIPSNEG) v.AddArg(x) return true @@ -5449,13 +5582,14 @@ func rewriteValueMIPS_OpMIPSSUB_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSUBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5465,7 +5599,6 @@ func rewriteValueMIPS_OpMIPSSUBconst_0(v *Value) bool { // result: (MOVWconst [int64(int32(d-c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5478,7 +5611,6 @@ func rewriteValueMIPS_OpMIPSSUBconst_0(v *Value) bool { // result: (ADDconst [int64(int32(-c-d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSSUBconst { break } @@ -5493,7 +5625,6 @@ func rewriteValueMIPS_OpMIPSSUBconst_0(v *Value) bool { // result: (ADDconst [int64(int32(-c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSADDconst { break } @@ -5507,13 +5638,13 @@ func rewriteValueMIPS_OpMIPSSUBconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSXOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XOR x (MOVWconst [c])) // result: (XORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPSMOVWconst { continue } @@ -5528,8 +5659,8 @@ func rewriteValueMIPS_OpMIPSXOR_0(v *Value) bool { // match: (XOR x x) // result: (MOVWconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpMIPSMOVWconst) @@ -5539,13 +5670,14 @@ func rewriteValueMIPS_OpMIPSXOR_0(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5557,7 +5689,7 @@ func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpMIPSNORconst) v.AuxInt = 0 v.AddArg(x) @@ -5567,7 +5699,6 @@ func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool { // result: (MOVWconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSMOVWconst { break } @@ -5580,7 +5711,6 @@ func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool { // result: (XORconst [c^d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPSXORconst { break } @@ -5594,13 +5724,15 @@ func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool { return false } func rewriteValueMIPS_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -5614,13 +5746,15 @@ func rewriteValueMIPS_OpMod16_0(v *Value) bool { } } func rewriteValueMIPS_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -5634,13 +5768,15 @@ func rewriteValueMIPS_OpMod16u_0(v *Value) bool { } } func rewriteValueMIPS_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) // result: (Select0 (DIV x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v0.AddArg(x) @@ -5650,13 +5786,15 @@ func rewriteValueMIPS_OpMod32_0(v *Value) bool { } } func rewriteValueMIPS_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (Select0 (DIVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg(x) @@ -5666,13 +5804,15 @@ func rewriteValueMIPS_OpMod32u_0(v *Value) bool { } } func rewriteValueMIPS_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -5686,13 +5826,15 @@ func rewriteValueMIPS_OpMod8_0(v *Value) bool { } } func rewriteValueMIPS_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -5706,6 +5848,9 @@ func rewriteValueMIPS_OpMod8u_0(v *Value) bool { } } func rewriteValueMIPS_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -5714,7 +5859,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -5726,9 +5871,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPSMOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) @@ -5746,9 +5891,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -5767,9 +5912,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 1 v.AddArg(dst) @@ -5796,9 +5941,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -5819,9 +5964,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -5849,9 +5994,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 3 v.AddArg(dst) @@ -5894,9 +6039,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPSMOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -5932,9 +6077,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -5964,9 +6109,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -6009,6 +6154,9 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool { return false } func rewriteValueMIPS_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -6020,9 +6168,9 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -6061,9 +6209,9 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -6102,9 +6250,9 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -6150,9 +6298,9 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) { break } @@ -6170,11 +6318,13 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool { return false } func rewriteValueMIPS_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSMUL) v.AddArg(x) v.AddArg(y) @@ -6182,11 +6332,13 @@ func rewriteValueMIPS_OpMul16_0(v *Value) bool { } } func rewriteValueMIPS_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSMUL) v.AddArg(x) v.AddArg(y) @@ -6194,11 +6346,13 @@ func rewriteValueMIPS_OpMul32_0(v *Value) bool { } } func rewriteValueMIPS_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (MULF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSMULF) v.AddArg(x) v.AddArg(y) @@ -6206,11 +6360,13 @@ func rewriteValueMIPS_OpMul32F_0(v *Value) bool { } } func rewriteValueMIPS_OpMul32uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32uhilo x y) // result: (MULTU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSMULTU) v.AddArg(x) v.AddArg(y) @@ -6218,11 +6374,13 @@ func rewriteValueMIPS_OpMul32uhilo_0(v *Value) bool { } } func rewriteValueMIPS_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (MULD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSMULD) v.AddArg(x) v.AddArg(y) @@ -6230,11 +6388,13 @@ func rewriteValueMIPS_OpMul64F_0(v *Value) bool { } } func rewriteValueMIPS_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSMUL) v.AddArg(x) v.AddArg(y) @@ -6242,63 +6402,70 @@ func rewriteValueMIPS_OpMul8_0(v *Value) bool { } } func rewriteValueMIPS_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNEG) v.AddArg(x) return true } } func rewriteValueMIPS_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNEG) v.AddArg(x) return true } } func rewriteValueMIPS_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (NEGF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNEGF) v.AddArg(x) return true } } func rewriteValueMIPS_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (NEGD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNEGD) v.AddArg(x) return true } } func rewriteValueMIPS_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNEG) v.AddArg(x) return true } } func rewriteValueMIPS_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -6315,13 +6482,15 @@ func rewriteValueMIPS_OpNeq16_0(v *Value) bool { } } func rewriteValueMIPS_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32 x y) // result: (SGTU (XOR x y) (MOVWconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v0.AddArg(x) @@ -6334,12 +6503,14 @@ func rewriteValueMIPS_OpNeq32_0(v *Value) bool { } } func rewriteValueMIPS_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (FPFlagFalse (CMPEQF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) v0.AddArg(x) @@ -6349,12 +6520,14 @@ func rewriteValueMIPS_OpNeq32F_0(v *Value) bool { } } func rewriteValueMIPS_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (FPFlagFalse (CMPEQD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSFPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) v0.AddArg(x) @@ -6364,13 +6537,15 @@ func rewriteValueMIPS_OpNeq64F_0(v *Value) bool { } } func rewriteValueMIPS_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -6387,11 +6562,13 @@ func rewriteValueMIPS_OpNeq8_0(v *Value) bool { } } func rewriteValueMIPS_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXOR) v.AddArg(x) v.AddArg(y) @@ -6399,13 +6576,15 @@ func rewriteValueMIPS_OpNeqB_0(v *Value) bool { } } func rewriteValueMIPS_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NeqPtr x y) // result: (SGTU (XOR x y) (MOVWconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSGTU) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v0.AddArg(x) @@ -6418,11 +6597,13 @@ func rewriteValueMIPS_OpNeqPtr_0(v *Value) bool { } } func rewriteValueMIPS_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSLoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -6430,10 +6611,11 @@ func rewriteValueMIPS_OpNilCheck_0(v *Value) bool { } } func rewriteValueMIPS_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSXORconst) v.AuxInt = 1 v.AddArg(x) @@ -6441,11 +6623,12 @@ func rewriteValueMIPS_OpNot_0(v *Value) bool { } } func rewriteValueMIPS_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) // result: (MOVWaddr [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if ptr.Op != OpSP { break } @@ -6458,7 +6641,7 @@ func rewriteValueMIPS_OpOffPtr_0(v *Value) bool { // result: (ADDconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpMIPSADDconst) v.AuxInt = off v.AddArg(ptr) @@ -6466,11 +6649,13 @@ func rewriteValueMIPS_OpOffPtr_0(v *Value) bool { } } func rewriteValueMIPS_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSOR) v.AddArg(x) v.AddArg(y) @@ -6478,11 +6663,13 @@ func rewriteValueMIPS_OpOr16_0(v *Value) bool { } } func rewriteValueMIPS_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSOR) v.AddArg(x) v.AddArg(y) @@ -6490,11 +6677,13 @@ func rewriteValueMIPS_OpOr32_0(v *Value) bool { } } func rewriteValueMIPS_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSOR) v.AddArg(x) v.AddArg(y) @@ -6502,11 +6691,13 @@ func rewriteValueMIPS_OpOr8_0(v *Value) bool { } } func rewriteValueMIPS_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSOR) v.AddArg(x) v.AddArg(y) @@ -6514,14 +6705,17 @@ func rewriteValueMIPS_OpOrB_0(v *Value) bool { } } func rewriteValueMIPS_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -6537,9 +6731,9 @@ func rewriteValueMIPS_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -6555,9 +6749,9 @@ func rewriteValueMIPS_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -6571,15 +6765,19 @@ func rewriteValueMIPS_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueMIPS_OpPanicExtend_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicExtend [kind] hi lo y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicExtendA [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 0) { break } @@ -6596,10 +6794,10 @@ func rewriteValueMIPS_OpPanicExtend_0(v *Value) bool { // result: (LoweredPanicExtendB [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 1) { break } @@ -6616,10 +6814,10 @@ func rewriteValueMIPS_OpPanicExtend_0(v *Value) bool { // result: (LoweredPanicExtendC [kind] hi lo y mem) for { kind := v.AuxInt - mem := v.Args[3] - hi := v.Args[0] - lo := v.Args[1] - y := v.Args[2] + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 if !(boundsABI(kind) == 2) { break } @@ -6634,15 +6832,15 @@ func rewriteValueMIPS_OpPanicExtend_0(v *Value) bool { return false } func rewriteValueMIPS_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVWconst [c])) // result: (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -6665,15 +6863,15 @@ func rewriteValueMIPS_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueMIPS_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft32 x (MOVWconst [c])) // result: (Or32 (Lsh32x32 x (MOVWconst [c&31])) (Rsh32Ux32 x (MOVWconst [-c&31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -6696,15 +6894,15 @@ func rewriteValueMIPS_OpRotateLeft32_0(v *Value) bool { return false } func rewriteValueMIPS_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft64 x (MOVWconst [c])) // result: (Or64 (Lsh64x32 x (MOVWconst [c&63])) (Rsh64Ux32 x (MOVWconst [-c&63]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -6727,15 +6925,15 @@ func rewriteValueMIPS_OpRotateLeft64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVWconst [c])) // result: (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPSMOVWconst { break } @@ -6758,10 +6956,11 @@ func rewriteValueMIPS_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueMIPS_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -6769,10 +6968,11 @@ func rewriteValueMIPS_OpRound32F_0(v *Value) bool { } } func rewriteValueMIPS_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -6780,14 +6980,16 @@ func rewriteValueMIPS_OpRound64F_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // result: (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -6810,14 +7012,16 @@ func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // result: (CMOVZ (SRL (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -6836,15 +7040,15 @@ func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SRLconst (SLLconst x [16]) [c+16]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -6864,8 +7068,6 @@ func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool { // cond: uint32(c) >= 16 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -6880,14 +7082,16 @@ func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // result: (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -6910,13 +7114,15 @@ func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -6939,13 +7145,15 @@ func rewriteValueMIPS_OpRsh16x16_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // result: (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -6964,15 +7172,15 @@ func rewriteValueMIPS_OpRsh16x32_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SRAconst (SLLconst x [16]) [c+16]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -6992,9 +7200,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool { // cond: uint32(c) >= 16 // result: (SRAconst (SLLconst x [16]) [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7013,13 +7219,15 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -7042,14 +7250,16 @@ func rewriteValueMIPS_OpRsh16x8_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // result: (CMOVZ (SRL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v0.AddArg(x) @@ -7070,14 +7280,16 @@ func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // result: (CMOVZ (SRL x y) (MOVWconst [0]) (SGTUconst [32] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v0.AddArg(x) @@ -7094,13 +7306,13 @@ func rewriteValueMIPS_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh32Ux64 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SRLconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7117,8 +7329,6 @@ func rewriteValueMIPS_OpRsh32Ux64_0(v *Value) bool { // cond: uint32(c) >= 32 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -7133,14 +7343,16 @@ func rewriteValueMIPS_OpRsh32Ux64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // result: (CMOVZ (SRL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v0.AddArg(x) @@ -7161,13 +7373,15 @@ func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // result: (SRA x ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) @@ -7188,13 +7402,15 @@ func rewriteValueMIPS_OpRsh32x16_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) // result: (SRA x ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) @@ -7211,13 +7427,13 @@ func rewriteValueMIPS_OpRsh32x32_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Rsh32x64 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SRAconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7234,9 +7450,7 @@ func rewriteValueMIPS_OpRsh32x64_0(v *Value) bool { // cond: uint32(c) >= 32 // result: (SRAconst x [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7252,13 +7466,15 @@ func rewriteValueMIPS_OpRsh32x64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // result: (SRA x ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) @@ -7279,14 +7495,16 @@ func rewriteValueMIPS_OpRsh32x8_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // result: (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -7309,14 +7527,16 @@ func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // result: (CMOVZ (SRL (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -7335,15 +7555,15 @@ func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SRLconst (SLLconst x [24]) [c+24]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7363,8 +7583,6 @@ func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool { // cond: uint32(c) >= 8 // result: (MOVWconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -7379,14 +7597,16 @@ func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // result: (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -7409,13 +7629,15 @@ func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -7438,13 +7660,15 @@ func rewriteValueMIPS_OpRsh8x16_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // result: (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -7463,15 +7687,15 @@ func rewriteValueMIPS_OpRsh8x32_0(v *Value) bool { } } func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SRAconst (SLLconst x [24]) [c+24]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7491,9 +7715,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool { // cond: uint32(c) >= 8 // result: (SRAconst (SLLconst x [24]) [31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -7512,13 +7734,15 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool { return false } func rewriteValueMIPS_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -7541,12 +7765,12 @@ func rewriteValueMIPS_OpRsh8x8_0(v *Value) bool { } } func rewriteValueMIPS_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select0 (Add32carry x y)) // result: (ADD x y) for { - v_0 := v.Args[0] if v_0.Op != OpAdd32carry { break } @@ -7562,7 +7786,6 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (Sub32carry x y)) // result: (SUB x y) for { - v_0 := v.Args[0] if v_0.Op != OpSub32carry { break } @@ -7578,13 +7801,13 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (MULTU (MOVWconst [0]) _ )) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 { continue } @@ -7597,13 +7820,13 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (MULTU (MOVWconst [1]) _ )) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 { continue } @@ -7616,17 +7839,17 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (MULTU (MOVWconst [-1]) x )) // result: (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpMIPSCMOVZ) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) v0.AuxInt = -1 @@ -7644,18 +7867,18 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // cond: isPowerOfTwo(int64(uint32(c))) // result: (SRLconst [32-log2(int64(uint32(c)))] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst { continue } c := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(isPowerOfTwo(int64(uint32(c)))) { continue } @@ -7669,18 +7892,17 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [(c*d)>>32]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst { continue } c := v_0_0.AuxInt - v_0_1 := v_0.Args[1^_i0] if v_0_1.Op != OpMIPSMOVWconst { continue } @@ -7694,7 +7916,6 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(c)%int32(d))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSDIV { break } @@ -7716,7 +7937,6 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { // match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(uint32(c)%uint32(d)))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSDIVU { break } @@ -7738,12 +7958,12 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool { return false } func rewriteValueMIPS_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select1 (Add32carry x y)) // result: (SGTU x (ADD x y)) for { - v_0 := v.Args[0] if v_0.Op != OpAdd32carry { break } @@ -7762,7 +7982,6 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (Sub32carry x y)) // result: (SGTU (SUB x y) x) for { - v_0 := v.Args[0] if v_0.Op != OpSub32carry { break } @@ -7781,13 +8000,13 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (MULTU (MOVWconst [0]) _ )) // result: (MOVWconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 { continue } @@ -7800,17 +8019,17 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (MULTU (MOVWconst [1]) x )) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -7821,17 +8040,17 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (MULTU (MOVWconst [-1]) x )) // result: (NEG x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpMIPSNEG) v.Type = x.Type v.AddArg(x) @@ -7843,18 +8062,18 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // cond: isPowerOfTwo(int64(uint32(c))) // result: (SLLconst [log2(int64(uint32(c)))] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst { continue } c := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(isPowerOfTwo(int64(uint32(c)))) { continue } @@ -7868,18 +8087,17 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSMULTU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPSMOVWconst { continue } c := v_0_0.AuxInt - v_0_1 := v_0.Args[1^_i0] if v_0_1.Op != OpMIPSMOVWconst { continue } @@ -7893,7 +8111,6 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(c)/int32(d))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSDIV { break } @@ -7915,7 +8132,6 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { // match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) // result: (MOVWconst [int64(int32(uint32(c)/uint32(d)))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPSDIVU { break } @@ -7937,40 +8153,44 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool { return false } func rewriteValueMIPS_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVHreg) v.AddArg(x) return true } } func rewriteValueMIPS_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVBreg) v.AddArg(x) return true } } func rewriteValueMIPS_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVBreg) v.AddArg(x) return true } } func rewriteValueMIPS_OpSignmask_0(v *Value) bool { + v_0 := v.Args[0] // match: (Signmask x) // result: (SRAconst x [31]) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSSRAconst) v.AuxInt = 31 v.AddArg(x) @@ -7978,12 +8198,13 @@ func rewriteValueMIPS_OpSignmask_0(v *Value) bool { } } func rewriteValueMIPS_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SRAconst (NEG x) [31]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpMIPSSRAconst) v.AuxInt = 31 v0 := b.NewValue0(v.Pos, OpMIPSNEG, t) @@ -7993,22 +8214,24 @@ func rewriteValueMIPS_OpSlicemask_0(v *Value) bool { } } func rewriteValueMIPS_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (SQRTD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSSQRTD) v.AddArg(x) return true } } func rewriteValueMIPS_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpMIPSCALLstatic) v.AuxInt = argwid v.Aux = target @@ -8017,14 +8240,17 @@ func rewriteValueMIPS_OpStaticCall_0(v *Value) bool { } } func rewriteValueMIPS_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 1 // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -8039,9 +8265,9 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -8056,9 +8282,9 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { break } @@ -8073,9 +8299,9 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool { // result: (MOVFstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -8090,9 +8316,9 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -8105,11 +8331,13 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool { return false } func rewriteValueMIPS_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSUB) v.AddArg(x) v.AddArg(y) @@ -8117,11 +8345,13 @@ func rewriteValueMIPS_OpSub16_0(v *Value) bool { } } func rewriteValueMIPS_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSUB) v.AddArg(x) v.AddArg(y) @@ -8129,11 +8359,13 @@ func rewriteValueMIPS_OpSub32_0(v *Value) bool { } } func rewriteValueMIPS_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (SUBF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSUBF) v.AddArg(x) v.AddArg(y) @@ -8141,14 +8373,17 @@ func rewriteValueMIPS_OpSub32F_0(v *Value) bool { } } func rewriteValueMIPS_OpSub32withcarry_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Sub32withcarry x y c) // result: (SUB (SUB x y) c) for { t := v.Type - c := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + c := v_2 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSSUB, t) v0.AddArg(x) @@ -8159,11 +8394,13 @@ func rewriteValueMIPS_OpSub32withcarry_0(v *Value) bool { } } func rewriteValueMIPS_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (SUBD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSUBD) v.AddArg(x) v.AddArg(y) @@ -8171,11 +8408,13 @@ func rewriteValueMIPS_OpSub64F_0(v *Value) bool { } } func rewriteValueMIPS_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSUB) v.AddArg(x) v.AddArg(y) @@ -8183,11 +8422,13 @@ func rewriteValueMIPS_OpSub8_0(v *Value) bool { } } func rewriteValueMIPS_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSSUB) v.AddArg(x) v.AddArg(y) @@ -8195,10 +8436,11 @@ func rewriteValueMIPS_OpSubPtr_0(v *Value) bool { } } func rewriteValueMIPS_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8206,10 +8448,11 @@ func rewriteValueMIPS_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueMIPS_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8217,10 +8460,11 @@ func rewriteValueMIPS_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueMIPS_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8228,13 +8472,16 @@ func rewriteValueMIPS_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueMIPS_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpMIPSLoweredWB) v.Aux = fn v.AddArg(destptr) @@ -8244,11 +8491,13 @@ func rewriteValueMIPS_OpWB_0(v *Value) bool { } } func rewriteValueMIPS_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXOR) v.AddArg(x) v.AddArg(y) @@ -8256,11 +8505,13 @@ func rewriteValueMIPS_OpXor16_0(v *Value) bool { } } func rewriteValueMIPS_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXOR) v.AddArg(x) v.AddArg(y) @@ -8268,11 +8519,13 @@ func rewriteValueMIPS_OpXor32_0(v *Value) bool { } } func rewriteValueMIPS_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPSXOR) v.AddArg(x) v.AddArg(y) @@ -8280,6 +8533,8 @@ func rewriteValueMIPS_OpXor8_0(v *Value) bool { } } func rewriteValueMIPS_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [0] _ mem) @@ -8288,7 +8543,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -8300,8 +8555,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSMOVBstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) @@ -8318,8 +8573,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -8337,8 +8592,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 1 v.AddArg(ptr) @@ -8363,8 +8618,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -8384,8 +8639,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -8411,8 +8666,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 3 v.AddArg(ptr) @@ -8449,8 +8704,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPSMOVBstore) v.AuxInt = 2 v.AddArg(ptr) @@ -8482,8 +8737,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -8518,8 +8773,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -8542,6 +8797,8 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool { return false } func rewriteValueMIPS_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -8553,8 +8810,8 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -8589,8 +8846,8 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -8630,8 +8887,8 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) { break } @@ -8648,42 +8905,46 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool { return false } func rewriteValueMIPS_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVHUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVHUreg) v.AddArg(x) return true } } func rewriteValueMIPS_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVBUreg) v.AddArg(x) return true } } func rewriteValueMIPS_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSMOVBUreg) v.AddArg(x) return true } } func rewriteValueMIPS_OpZeromask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zeromask x) // result: (NEG (SGTU x (MOVWconst [0]))) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPSNEG) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v0.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 98d1e3bd25..7657eef13d 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -621,11 +621,13 @@ func rewriteValueMIPS64(v *Value) bool { return false } func rewriteValueMIPS64_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADDV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDV) v.AddArg(x) v.AddArg(y) @@ -633,11 +635,13 @@ func rewriteValueMIPS64_OpAdd16_0(v *Value) bool { } } func rewriteValueMIPS64_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADDV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDV) v.AddArg(x) v.AddArg(y) @@ -645,11 +649,13 @@ func rewriteValueMIPS64_OpAdd32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (ADDF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDF) v.AddArg(x) v.AddArg(y) @@ -657,11 +663,13 @@ func rewriteValueMIPS64_OpAdd32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64 x y) // result: (ADDV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDV) v.AddArg(x) v.AddArg(y) @@ -669,11 +677,13 @@ func rewriteValueMIPS64_OpAdd64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (ADDD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDD) v.AddArg(x) v.AddArg(y) @@ -681,11 +691,13 @@ func rewriteValueMIPS64_OpAdd64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADDV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDV) v.AddArg(x) v.AddArg(y) @@ -693,11 +705,13 @@ func rewriteValueMIPS64_OpAdd8_0(v *Value) bool { } } func rewriteValueMIPS64_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADDV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDV) v.AddArg(x) v.AddArg(y) @@ -705,11 +719,12 @@ func rewriteValueMIPS64_OpAddPtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVVaddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpMIPS64MOVVaddr) v.Aux = sym v.AddArg(base) @@ -717,11 +732,13 @@ func rewriteValueMIPS64_OpAddr_0(v *Value) bool { } } func rewriteValueMIPS64_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v.AddArg(x) v.AddArg(y) @@ -729,11 +746,13 @@ func rewriteValueMIPS64_OpAnd16_0(v *Value) bool { } } func rewriteValueMIPS64_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v.AddArg(x) v.AddArg(y) @@ -741,11 +760,13 @@ func rewriteValueMIPS64_OpAnd32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And64 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v.AddArg(x) v.AddArg(y) @@ -753,11 +774,13 @@ func rewriteValueMIPS64_OpAnd64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v.AddArg(x) v.AddArg(y) @@ -765,11 +788,13 @@ func rewriteValueMIPS64_OpAnd8_0(v *Value) bool { } } func rewriteValueMIPS64_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v.AddArg(x) v.AddArg(y) @@ -777,12 +802,15 @@ func rewriteValueMIPS64_OpAndB_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicAdd32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd32 ptr val mem) // result: (LoweredAtomicAdd32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicAdd32) v.AddArg(ptr) v.AddArg(val) @@ -791,12 +819,15 @@ func rewriteValueMIPS64_OpAtomicAdd32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicAdd64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd64 ptr val mem) // result: (LoweredAtomicAdd64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicAdd64) v.AddArg(ptr) v.AddArg(val) @@ -805,13 +836,17 @@ func rewriteValueMIPS64_OpAtomicAdd64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicCompareAndSwap32_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap32 ptr old new_ mem) // result: (LoweredAtomicCas32 ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpMIPS64LoweredAtomicCas32) v.AddArg(ptr) v.AddArg(old) @@ -821,13 +856,17 @@ func rewriteValueMIPS64_OpAtomicCompareAndSwap32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicCompareAndSwap64_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap64 ptr old new_ mem) // result: (LoweredAtomicCas64 ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpMIPS64LoweredAtomicCas64) v.AddArg(ptr) v.AddArg(old) @@ -837,12 +876,15 @@ func rewriteValueMIPS64_OpAtomicCompareAndSwap64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicExchange32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange32 ptr val mem) // result: (LoweredAtomicExchange32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicExchange32) v.AddArg(ptr) v.AddArg(val) @@ -851,12 +893,15 @@ func rewriteValueMIPS64_OpAtomicExchange32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicExchange64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange64 ptr val mem) // result: (LoweredAtomicExchange64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicExchange64) v.AddArg(ptr) v.AddArg(val) @@ -865,11 +910,13 @@ func rewriteValueMIPS64_OpAtomicExchange64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicLoad32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad32 ptr mem) // result: (LoweredAtomicLoad32 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64LoweredAtomicLoad32) v.AddArg(ptr) v.AddArg(mem) @@ -877,11 +924,13 @@ func rewriteValueMIPS64_OpAtomicLoad32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicLoad64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad64 ptr mem) // result: (LoweredAtomicLoad64 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64LoweredAtomicLoad64) v.AddArg(ptr) v.AddArg(mem) @@ -889,11 +938,13 @@ func rewriteValueMIPS64_OpAtomicLoad64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicLoad8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad8 ptr mem) // result: (LoweredAtomicLoad8 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64LoweredAtomicLoad8) v.AddArg(ptr) v.AddArg(mem) @@ -901,11 +952,13 @@ func rewriteValueMIPS64_OpAtomicLoad8_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicLoadPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadPtr ptr mem) // result: (LoweredAtomicLoad64 ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64LoweredAtomicLoad64) v.AddArg(ptr) v.AddArg(mem) @@ -913,12 +966,15 @@ func rewriteValueMIPS64_OpAtomicLoadPtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore32 ptr val mem) // result: (LoweredAtomicStore32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicStore32) v.AddArg(ptr) v.AddArg(val) @@ -927,12 +983,15 @@ func rewriteValueMIPS64_OpAtomicStore32_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicStore64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore64 ptr val mem) // result: (LoweredAtomicStore64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicStore64) v.AddArg(ptr) v.AddArg(val) @@ -941,12 +1000,15 @@ func rewriteValueMIPS64_OpAtomicStore64_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicStore8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore8 ptr val mem) // result: (LoweredAtomicStore8 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicStore8) v.AddArg(ptr) v.AddArg(val) @@ -955,12 +1017,15 @@ func rewriteValueMIPS64_OpAtomicStore8_0(v *Value) bool { } } func rewriteValueMIPS64_OpAtomicStorePtrNoWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStorePtrNoWB ptr val mem) // result: (LoweredAtomicStore64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpMIPS64LoweredAtomicStore64) v.AddArg(ptr) v.AddArg(val) @@ -969,13 +1034,15 @@ func rewriteValueMIPS64_OpAtomicStorePtrNoWB_0(v *Value) bool { } } func rewriteValueMIPS64_OpAvg64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Avg64u x y) // result: (ADDV (SRLVconst (SUBV x y) [1]) y) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64ADDV) v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t) v0.AuxInt = 1 @@ -989,13 +1056,16 @@ func rewriteValueMIPS64_OpAvg64u_0(v *Value) bool { } } func rewriteValueMIPS64_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpMIPS64CALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -1005,12 +1075,13 @@ func rewriteValueMIPS64_OpClosureCall_0(v *Value) bool { } } func rewriteValueMIPS64_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com16 x) // result: (NOR (MOVVconst [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 @@ -1020,12 +1091,13 @@ func rewriteValueMIPS64_OpCom16_0(v *Value) bool { } } func rewriteValueMIPS64_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com32 x) // result: (NOR (MOVVconst [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 @@ -1035,12 +1107,13 @@ func rewriteValueMIPS64_OpCom32_0(v *Value) bool { } } func rewriteValueMIPS64_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com64 x) // result: (NOR (MOVVconst [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 @@ -1050,12 +1123,13 @@ func rewriteValueMIPS64_OpCom64_0(v *Value) bool { } } func rewriteValueMIPS64_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com8 x) // result: (NOR (MOVVconst [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 0 @@ -1144,113 +1218,125 @@ func rewriteValueMIPS64_OpConstNil_0(v *Value) bool { } } func rewriteValueMIPS64_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (TRUNCFW x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64TRUNCFW) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64 x) // result: (TRUNCFV x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64TRUNCFV) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (MOVFD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVFD) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (MOVWF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVWF) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (MOVWD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVWD) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (TRUNCDW x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64TRUNCDW) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (MOVDF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVDF) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64 x) // result: (TRUNCDV x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64TRUNCDV) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to32F x) // result: (MOVVF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVVF) v.AddArg(x) return true } } func rewriteValueMIPS64_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to64F x) // result: (MOVVD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVVD) v.AddArg(x) return true } } func rewriteValueMIPS64_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -1264,13 +1350,15 @@ func rewriteValueMIPS64_OpDiv16_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -1284,13 +1372,15 @@ func rewriteValueMIPS64_OpDiv16u_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32 x y) // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -1304,11 +1394,13 @@ func rewriteValueMIPS64_OpDiv32_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (DIVF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64DIVF) v.AddArg(x) v.AddArg(y) @@ -1316,13 +1408,15 @@ func rewriteValueMIPS64_OpDiv32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32u x y) // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -1336,13 +1430,15 @@ func rewriteValueMIPS64_OpDiv32u_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div64 x y) // result: (Select1 (DIVV x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v0.AddArg(x) @@ -1352,11 +1448,13 @@ func rewriteValueMIPS64_OpDiv64_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (DIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64DIVD) v.AddArg(x) v.AddArg(y) @@ -1364,13 +1462,15 @@ func rewriteValueMIPS64_OpDiv64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div64u x y) // result: (Select1 (DIVVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -1380,13 +1480,15 @@ func rewriteValueMIPS64_OpDiv64u_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -1400,13 +1502,15 @@ func rewriteValueMIPS64_OpDiv8_0(v *Value) bool { } } func rewriteValueMIPS64_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -1420,13 +1524,15 @@ func rewriteValueMIPS64_OpDiv8u_0(v *Value) bool { } } func rewriteValueMIPS64_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1443,13 +1549,15 @@ func rewriteValueMIPS64_OpEq16_0(v *Value) bool { } } func rewriteValueMIPS64_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x y) // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1466,12 +1574,14 @@ func rewriteValueMIPS64_OpEq32_0(v *Value) bool { } } func rewriteValueMIPS64_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (FPFlagTrue (CMPEQF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) v0.AddArg(x) @@ -1481,13 +1591,15 @@ func rewriteValueMIPS64_OpEq32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq64 x y) // result: (SGTU (MOVVconst [1]) (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1500,12 +1612,14 @@ func rewriteValueMIPS64_OpEq64_0(v *Value) bool { } } func rewriteValueMIPS64_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (FPFlagTrue (CMPEQD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) v0.AddArg(x) @@ -1515,13 +1629,15 @@ func rewriteValueMIPS64_OpEq64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1538,13 +1654,15 @@ func rewriteValueMIPS64_OpEq8_0(v *Value) bool { } } func rewriteValueMIPS64_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (XOR (MOVVconst [1]) (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1557,13 +1675,15 @@ func rewriteValueMIPS64_OpEqB_0(v *Value) bool { } } func rewriteValueMIPS64_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqPtr x y) // result: (SGTU (MOVVconst [1]) (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1576,13 +1696,15 @@ func rewriteValueMIPS64_OpEqPtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 y) (SignExt16to64 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1599,13 +1721,15 @@ func rewriteValueMIPS64_OpGeq16_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1622,13 +1746,15 @@ func rewriteValueMIPS64_OpGeq16U_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32 x y) // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 y) (SignExt32to64 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1645,12 +1771,14 @@ func rewriteValueMIPS64_OpGeq32_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (FPFlagTrue (CMPGEF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) v0.AddArg(x) @@ -1660,13 +1788,15 @@ func rewriteValueMIPS64_OpGeq32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32U x y) // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1683,13 +1813,15 @@ func rewriteValueMIPS64_OpGeq32U_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64 x y) // result: (XOR (MOVVconst [1]) (SGT y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1702,12 +1834,14 @@ func rewriteValueMIPS64_OpGeq64_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (FPFlagTrue (CMPGED x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) v0.AddArg(x) @@ -1717,13 +1851,15 @@ func rewriteValueMIPS64_OpGeq64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64U x y) // result: (XOR (MOVVconst [1]) (SGTU y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1736,13 +1872,15 @@ func rewriteValueMIPS64_OpGeq64U_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 y) (SignExt8to64 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1759,13 +1897,15 @@ func rewriteValueMIPS64_OpGeq8_0(v *Value) bool { } } func rewriteValueMIPS64_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -1806,13 +1946,15 @@ func rewriteValueMIPS64_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (SGT (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -1824,13 +1966,15 @@ func rewriteValueMIPS64_OpGreater16_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1842,13 +1986,15 @@ func rewriteValueMIPS64_OpGreater16U_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32 x y) // result: (SGT (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1860,12 +2006,14 @@ func rewriteValueMIPS64_OpGreater32_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (FPFlagTrue (CMPGTF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) v0.AddArg(x) @@ -1875,13 +2023,15 @@ func rewriteValueMIPS64_OpGreater32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32U x y) // result: (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1893,11 +2043,13 @@ func rewriteValueMIPS64_OpGreater32U_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64 x y) // result: (SGT x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v.AddArg(x) v.AddArg(y) @@ -1905,12 +2057,14 @@ func rewriteValueMIPS64_OpGreater64_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (FPFlagTrue (CMPGTD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) v0.AddArg(x) @@ -1920,11 +2074,13 @@ func rewriteValueMIPS64_OpGreater64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64U x y) // result: (SGTU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v.AddArg(x) v.AddArg(y) @@ -1932,13 +2088,15 @@ func rewriteValueMIPS64_OpGreater64U_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (SGT (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -1950,13 +2108,15 @@ func rewriteValueMIPS64_OpGreater8_0(v *Value) bool { } } func rewriteValueMIPS64_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -1968,13 +2128,15 @@ func rewriteValueMIPS64_OpGreater8U_0(v *Value) bool { } } func rewriteValueMIPS64_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32 x y) // result: (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAVconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64) @@ -1991,13 +2153,15 @@ func rewriteValueMIPS64_OpHmul32_0(v *Value) bool { } } func rewriteValueMIPS64_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32u x y) // result: (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRLVconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) @@ -2014,13 +2178,15 @@ func rewriteValueMIPS64_OpHmul32u_0(v *Value) bool { } } func rewriteValueMIPS64_OpHmul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul64 x y) // result: (Select0 (MULV x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) v0.AddArg(x) @@ -2030,13 +2196,15 @@ func rewriteValueMIPS64_OpHmul64_0(v *Value) bool { } } func rewriteValueMIPS64_OpHmul64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul64u x y) // result: (Select0 (MULVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -2046,12 +2214,14 @@ func rewriteValueMIPS64_OpHmul64u_0(v *Value) bool { } } func rewriteValueMIPS64_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpMIPS64CALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -2060,11 +2230,13 @@ func rewriteValueMIPS64_OpInterCall_0(v *Value) bool { } } func rewriteValueMIPS64_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsInBounds idx len) // result: (SGTU len idx) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpMIPS64SGTU) v.AddArg(len) v.AddArg(idx) @@ -2072,12 +2244,13 @@ func rewriteValueMIPS64_OpIsInBounds_0(v *Value) bool { } } func rewriteValueMIPS64_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsNonNil ptr) // result: (SGTU ptr (MOVVconst [0])) for { - ptr := v.Args[0] + ptr := v_0 v.reset(OpMIPS64SGTU) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) @@ -2087,13 +2260,15 @@ func rewriteValueMIPS64_OpIsNonNil_0(v *Value) bool { } } func rewriteValueMIPS64_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsSliceInBounds idx len) // result: (XOR (MOVVconst [1]) (SGTU idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2106,13 +2281,15 @@ func rewriteValueMIPS64_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2129,13 +2306,15 @@ func rewriteValueMIPS64_OpLeq16_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2152,13 +2331,15 @@ func rewriteValueMIPS64_OpLeq16U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32 x y) // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2175,12 +2356,14 @@ func rewriteValueMIPS64_OpLeq32_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (FPFlagTrue (CMPGEF y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) v0.AddArg(y) @@ -2190,13 +2373,15 @@ func rewriteValueMIPS64_OpLeq32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32U x y) // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2213,13 +2398,15 @@ func rewriteValueMIPS64_OpLeq32U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64 x y) // result: (XOR (MOVVconst [1]) (SGT x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2232,12 +2419,14 @@ func rewriteValueMIPS64_OpLeq64_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (FPFlagTrue (CMPGED y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) v0.AddArg(y) @@ -2247,13 +2436,15 @@ func rewriteValueMIPS64_OpLeq64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64U x y) // result: (XOR (MOVVconst [1]) (SGTU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2266,13 +2457,15 @@ func rewriteValueMIPS64_OpLeq64U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2289,13 +2482,15 @@ func rewriteValueMIPS64_OpLeq8_0(v *Value) bool { } } func rewriteValueMIPS64_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v0.AuxInt = 1 @@ -2312,13 +2507,15 @@ func rewriteValueMIPS64_OpLeq8U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (SGT (SignExt16to64 y) (SignExt16to64 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(y) @@ -2330,13 +2527,15 @@ func rewriteValueMIPS64_OpLess16_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(y) @@ -2348,13 +2547,15 @@ func rewriteValueMIPS64_OpLess16U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32 x y) // result: (SGT (SignExt32to64 y) (SignExt32to64 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(y) @@ -2366,12 +2567,14 @@ func rewriteValueMIPS64_OpLess32_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (FPFlagTrue (CMPGTF y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) v0.AddArg(y) @@ -2381,13 +2584,15 @@ func rewriteValueMIPS64_OpLess32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32U x y) // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(y) @@ -2399,11 +2604,13 @@ func rewriteValueMIPS64_OpLess32U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64 x y) // result: (SGT y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v.AddArg(y) v.AddArg(x) @@ -2411,12 +2618,14 @@ func rewriteValueMIPS64_OpLess64_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (FPFlagTrue (CMPGTD y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagTrue) v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) v0.AddArg(y) @@ -2426,11 +2635,13 @@ func rewriteValueMIPS64_OpLess64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64U x y) // result: (SGTU y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v.AddArg(y) v.AddArg(x) @@ -2438,13 +2649,15 @@ func rewriteValueMIPS64_OpLess64U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (SGT (SignExt8to64 y) (SignExt8to64 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(y) @@ -2456,13 +2669,15 @@ func rewriteValueMIPS64_OpLess8_0(v *Value) bool { } } func rewriteValueMIPS64_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(y) @@ -2474,13 +2689,15 @@ func rewriteValueMIPS64_OpLess8U_0(v *Value) bool { } } func rewriteValueMIPS64_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: t.IsBoolean() // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean()) { break } @@ -2494,8 +2711,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -2509,8 +2726,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && !isSigned(t)) { break } @@ -2524,8 +2741,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -2539,8 +2756,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVHUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -2554,8 +2771,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && isSigned(t)) { break } @@ -2569,8 +2786,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVWUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && !isSigned(t)) { break } @@ -2584,8 +2801,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVVload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } @@ -2599,8 +2816,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVFload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -2614,8 +2831,8 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -2627,12 +2844,12 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool { return false } func rewriteValueMIPS64_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVVaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpMIPS64MOVVaddr) v.Aux = sym v.AddArg(base) @@ -2640,14 +2857,16 @@ func rewriteValueMIPS64_OpLocalAddr_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2669,14 +2888,16 @@ func rewriteValueMIPS64_OpLsh16x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2698,14 +2919,16 @@ func rewriteValueMIPS64_OpLsh16x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2723,14 +2946,16 @@ func rewriteValueMIPS64_OpLsh16x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2752,14 +2977,16 @@ func rewriteValueMIPS64_OpLsh16x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2781,14 +3008,16 @@ func rewriteValueMIPS64_OpLsh32x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2810,14 +3039,16 @@ func rewriteValueMIPS64_OpLsh32x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2835,14 +3066,16 @@ func rewriteValueMIPS64_OpLsh32x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2864,14 +3097,16 @@ func rewriteValueMIPS64_OpLsh32x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2893,14 +3128,16 @@ func rewriteValueMIPS64_OpLsh64x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2922,14 +3159,16 @@ func rewriteValueMIPS64_OpLsh64x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2947,14 +3186,16 @@ func rewriteValueMIPS64_OpLsh64x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -2976,14 +3217,16 @@ func rewriteValueMIPS64_OpLsh64x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -3005,14 +3248,16 @@ func rewriteValueMIPS64_OpLsh8x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -3034,14 +3279,16 @@ func rewriteValueMIPS64_OpLsh8x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -3059,14 +3306,16 @@ func rewriteValueMIPS64_OpLsh8x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -3088,14 +3337,14 @@ func rewriteValueMIPS64_OpLsh8x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpMIPS64ADDV_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDV x (MOVVconst [c])) // cond: is32Bit(c) // result: (ADDVconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPS64MOVVconst { continue } @@ -3113,10 +3362,8 @@ func rewriteValueMIPS64_OpMIPS64ADDV_0(v *Value) bool { // match: (ADDV x (NEGV y)) // result: (SUBV x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPS64NEGV { continue } @@ -3131,11 +3378,11 @@ func rewriteValueMIPS64_OpMIPS64ADDV_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64ADDVconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) // result: (MOVVaddr [off1+off2] {sym} ptr) for { off1 := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } @@ -3154,7 +3401,7 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -3164,7 +3411,6 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst_0(v *Value) bool { // result: (MOVVconst [c+d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -3178,7 +3424,6 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst_0(v *Value) bool { // result: (ADDVconst [c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } @@ -3197,7 +3442,6 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst_0(v *Value) bool { // result: (ADDVconst [c-d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64SUBVconst { break } @@ -3214,14 +3458,14 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64AND_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AND x (MOVVconst [c])) // cond: is32Bit(c) // result: (ANDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPS64MOVVconst { continue } @@ -3239,8 +3483,8 @@ func rewriteValueMIPS64_OpMIPS64AND_0(v *Value) bool { // match: (AND x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -3251,6 +3495,7 @@ func rewriteValueMIPS64_OpMIPS64AND_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [0] _) // result: (MOVVconst [0]) for { @@ -3267,7 +3512,7 @@ func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -3277,7 +3522,6 @@ func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool { // result: (MOVVconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -3290,7 +3534,6 @@ func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool { // result: (ANDconst [c&d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64ANDconst { break } @@ -3304,17 +3547,19 @@ func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) // cond: is32Bit(c) // result: (LoweredAtomicAddconst32 [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(c)) { break } @@ -3327,17 +3572,19 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) // cond: is32Bit(c) // result: (LoweredAtomicAddconst64 [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst { break } c := v_1.AuxInt + mem := v_2 if !(is32Bit(c)) { break } @@ -3350,15 +3597,17 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) // result: (LoweredAtomicStorezero32 ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPS64LoweredAtomicStorezero32) v.AddArg(ptr) v.AddArg(mem) @@ -3367,15 +3616,17 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) // result: (LoweredAtomicStorezero64 ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPS64LoweredAtomicStorezero64) v.AddArg(ptr) v.AddArg(mem) @@ -3384,19 +3635,20 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3413,14 +3665,13 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3434,10 +3685,11 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBUreg x:(MOVBUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUload { break } @@ -3449,7 +3701,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg_0(v *Value) bool { // match: (MOVBUreg x:(MOVBUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUreg { break } @@ -3460,7 +3712,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg_0(v *Value) bool { // match: (MOVBUreg (MOVVconst [c])) // result: (MOVVconst [int64(uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -3472,19 +3723,20 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3501,14 +3753,13 @@ func rewriteValueMIPS64_OpMIPS64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3522,10 +3773,11 @@ func rewriteValueMIPS64_OpMIPS64MOVBload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBreg x:(MOVBload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBload { break } @@ -3537,7 +3789,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg_0(v *Value) bool { // match: (MOVBreg x:(MOVBreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBreg { break } @@ -3548,7 +3800,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg_0(v *Value) bool { // match: (MOVBreg (MOVVconst [c])) // result: (MOVVconst [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -3560,20 +3811,22 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3591,15 +3844,14 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3616,12 +3868,11 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPS64MOVBstorezero) v.AuxInt = off v.Aux = sym @@ -3634,13 +3885,12 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVBreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym @@ -3654,13 +3904,12 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVBUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym @@ -3674,13 +3923,12 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym @@ -3694,13 +3942,12 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym @@ -3714,13 +3961,12 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym @@ -3734,13 +3980,12 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVWUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = off v.Aux = sym @@ -3752,19 +3997,20 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3781,14 +4027,13 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3802,19 +4047,20 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVDload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3831,14 +4077,13 @@ func rewriteValueMIPS64_OpMIPS64MOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3852,20 +4097,22 @@ func rewriteValueMIPS64_OpMIPS64MOVDload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3883,15 +4130,14 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3906,19 +4152,20 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVFload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVFload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3935,14 +4182,13 @@ func rewriteValueMIPS64_OpMIPS64MOVFload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -3956,20 +4202,22 @@ func rewriteValueMIPS64_OpMIPS64MOVFload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVFstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVFstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3987,15 +4235,14 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4010,19 +4257,20 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVHUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4039,14 +4287,13 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4060,10 +4307,11 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHUreg x:(MOVBUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUload { break } @@ -4075,7 +4323,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHUload { break } @@ -4087,7 +4335,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVBUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUreg { break } @@ -4098,7 +4346,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg x:(MOVHUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHUreg { break } @@ -4109,7 +4357,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg_0(v *Value) bool { // match: (MOVHUreg (MOVVconst [c])) // result: (MOVVconst [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -4121,19 +4368,20 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVHload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4150,14 +4398,13 @@ func rewriteValueMIPS64_OpMIPS64MOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4171,10 +4418,11 @@ func rewriteValueMIPS64_OpMIPS64MOVHload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHreg x:(MOVBload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBload { break } @@ -4186,7 +4434,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUload { break } @@ -4198,7 +4446,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHload { break } @@ -4210,7 +4458,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBreg { break } @@ -4221,7 +4469,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVBUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUreg { break } @@ -4232,7 +4480,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { // match: (MOVHreg x:(MOVHreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHreg { break } @@ -4243,7 +4491,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { // match: (MOVHreg (MOVVconst [c])) // result: (MOVVconst [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -4255,20 +4502,22 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVHstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -4286,15 +4535,14 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4311,12 +4559,11 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPS64MOVHstorezero) v.AuxInt = off v.Aux = sym @@ -4329,13 +4576,12 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym @@ -4349,13 +4595,12 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVHUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym @@ -4369,13 +4614,12 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym @@ -4389,13 +4633,12 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVWUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVHstore) v.AuxInt = off v.Aux = sym @@ -4407,19 +4650,20 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVHstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4436,14 +4680,13 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4457,19 +4700,20 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVVload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVVload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4486,14 +4730,13 @@ func rewriteValueMIPS64_OpMIPS64MOVVload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4507,11 +4750,12 @@ func rewriteValueMIPS64_OpMIPS64MOVVload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVVreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVVreg x) // cond: x.Uses == 1 // result: (MOVVnop x) for { - x := v.Args[0] + x := v_0 if !(x.Uses == 1) { break } @@ -4522,7 +4766,6 @@ func rewriteValueMIPS64_OpMIPS64MOVVreg_0(v *Value) bool { // match: (MOVVreg (MOVVconst [c])) // result: (MOVVconst [c]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -4534,20 +4777,22 @@ func rewriteValueMIPS64_OpMIPS64MOVVreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVVstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVVstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -4565,15 +4810,14 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4590,12 +4834,11 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPS64MOVVstorezero) v.AuxInt = off v.Aux = sym @@ -4606,19 +4849,20 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVVstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVVstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4635,14 +4879,13 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4656,19 +4899,20 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVWUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4685,14 +4929,13 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4706,10 +4949,11 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWUreg x:(MOVBUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUload { break } @@ -4721,7 +4965,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVHUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHUload { break } @@ -4733,7 +4977,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVWUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVWUload { break } @@ -4745,7 +4989,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVBUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUreg { break } @@ -4756,7 +5000,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVHUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHUreg { break } @@ -4767,7 +5011,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg x:(MOVWUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVWUreg { break } @@ -4778,7 +5022,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { // match: (MOVWUreg (MOVVconst [c])) // result: (MOVVconst [int64(uint32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -4790,19 +5033,20 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVWload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -4819,14 +5063,13 @@ func rewriteValueMIPS64_OpMIPS64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -4840,10 +5083,11 @@ func rewriteValueMIPS64_OpMIPS64MOVWload_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg x:(MOVBload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBload { break } @@ -4855,7 +5099,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVBUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUload { break } @@ -4867,7 +5111,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHload { break } @@ -4879,7 +5123,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHUload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHUload { break } @@ -4891,7 +5135,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVWload _ _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVWload { break } @@ -4903,7 +5147,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVBreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBreg { break } @@ -4914,7 +5158,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVBUreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVBUreg { break } @@ -4925,7 +5169,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVHreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVHreg { break } @@ -4936,7 +5180,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg x:(MOVWreg _)) // result: (MOVVreg x) for { - x := v.Args[0] + x := v_0 if x.Op != OpMIPS64MOVWreg { break } @@ -4947,7 +5191,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { // match: (MOVWreg (MOVVconst [c])) // result: (MOVVconst [int64(int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -4959,20 +5202,22 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -4990,15 +5235,14 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -5015,12 +5259,11 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpMIPS64MOVWstorezero) v.AuxInt = off v.Aux = sym @@ -5033,13 +5276,12 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVWstore) v.AuxInt = off v.Aux = sym @@ -5053,13 +5295,12 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpMIPS64MOVWUreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpMIPS64MOVWstore) v.AuxInt = off v.Aux = sym @@ -5071,19 +5312,20 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVWstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -5100,14 +5342,13 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } @@ -5121,10 +5362,10 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64NEGV_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGV (MOVVconst [c])) // result: (MOVVconst [-c]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5136,14 +5377,14 @@ func rewriteValueMIPS64_OpMIPS64NEGV_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64NOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NOR x (MOVVconst [c])) // cond: is32Bit(c) // result: (NORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPS64MOVVconst { continue } @@ -5161,11 +5402,11 @@ func rewriteValueMIPS64_OpMIPS64NOR_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64NORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (NORconst [c] (MOVVconst [d])) // result: (MOVVconst [^(c|d)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5177,14 +5418,14 @@ func rewriteValueMIPS64_OpMIPS64NORconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64OR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OR x (MOVVconst [c])) // cond: is32Bit(c) // result: (ORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPS64MOVVconst { continue } @@ -5202,8 +5443,8 @@ func rewriteValueMIPS64_OpMIPS64OR_0(v *Value) bool { // match: (OR x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -5214,13 +5455,14 @@ func rewriteValueMIPS64_OpMIPS64OR_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64ORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5240,7 +5482,6 @@ func rewriteValueMIPS64_OpMIPS64ORconst_0(v *Value) bool { // result: (MOVVconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5254,7 +5495,6 @@ func rewriteValueMIPS64_OpMIPS64ORconst_0(v *Value) bool { // result: (ORconst [c|d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64ORconst { break } @@ -5271,16 +5511,17 @@ func rewriteValueMIPS64_OpMIPS64ORconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SGT_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SGT (MOVVconst [c]) x) // cond: is32Bit(c) // result: (SGTconst [c] x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } c := v_0.AuxInt + x := v_1 if !(is32Bit(c)) { break } @@ -5292,16 +5533,17 @@ func rewriteValueMIPS64_OpMIPS64SGT_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SGTU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SGTU (MOVVconst [c]) x) // cond: is32Bit(c) // result: (SGTUconst [c] x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } c := v_0.AuxInt + x := v_1 if !(is32Bit(c)) { break } @@ -5313,12 +5555,12 @@ func rewriteValueMIPS64_OpMIPS64SGTU_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SGTUconst [c] (MOVVconst [d])) // cond: uint64(c)>uint64(d) // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5335,7 +5577,6 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5352,7 +5593,6 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) { break } @@ -5365,7 +5605,6 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) { break } @@ -5378,7 +5617,6 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64ANDconst { break } @@ -5395,7 +5633,6 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64SRLVconst { break } @@ -5410,12 +5647,12 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SGTconst [c] (MOVVconst [d])) // cond: c>d // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5432,7 +5669,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5449,7 +5685,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) { break } @@ -5462,7 +5697,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) { break } @@ -5475,7 +5709,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) { break } @@ -5488,7 +5721,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) { break } @@ -5501,7 +5733,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) { break } @@ -5514,7 +5745,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) { break } @@ -5527,7 +5757,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) { break } @@ -5540,7 +5769,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) { break } @@ -5551,12 +5779,12 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SGTconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (SGTconst [c] (MOVWUreg _)) // cond: c < 0 // result: (MOVVconst [0]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) { break } @@ -5569,7 +5797,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_10(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64ANDconst { break } @@ -5586,7 +5813,6 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_10(v *Value) bool { // result: (MOVVconst [1]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64SRLVconst { break } @@ -5601,12 +5827,12 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_10(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SLLV_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SLLV _ (MOVVconst [c])) // cond: uint64(c)>=64 // result: (MOVVconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpMIPS64MOVVconst { break } @@ -5621,9 +5847,7 @@ func rewriteValueMIPS64_OpMIPS64SLLV_0(v *Value) bool { // match: (SLLV x (MOVVconst [c])) // result: (SLLVconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -5636,11 +5860,11 @@ func rewriteValueMIPS64_OpMIPS64SLLV_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SLLVconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SLLVconst [c] (MOVVconst [d])) // result: (MOVVconst [d<=64 // result: (SRAVconst x [63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -5674,9 +5898,7 @@ func rewriteValueMIPS64_OpMIPS64SRAV_0(v *Value) bool { // match: (SRAV x (MOVVconst [c])) // result: (SRAVconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -5689,11 +5911,11 @@ func rewriteValueMIPS64_OpMIPS64SRAV_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SRAVconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRAVconst [c] (MOVVconst [d])) // result: (MOVVconst [d>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5705,12 +5927,12 @@ func rewriteValueMIPS64_OpMIPS64SRAVconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SRLV_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SRLV _ (MOVVconst [c])) // cond: uint64(c)>=64 // result: (MOVVconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpMIPS64MOVVconst { break } @@ -5725,9 +5947,7 @@ func rewriteValueMIPS64_OpMIPS64SRLV_0(v *Value) bool { // match: (SRLV x (MOVVconst [c])) // result: (SRLVconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -5740,11 +5960,11 @@ func rewriteValueMIPS64_OpMIPS64SRLV_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SRLVconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRLVconst [c] (MOVVconst [d])) // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5756,13 +5976,13 @@ func rewriteValueMIPS64_OpMIPS64SRLVconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SUBV_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBV x (MOVVconst [c])) // cond: is32Bit(c) // result: (SUBVconst [c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -5778,8 +5998,8 @@ func rewriteValueMIPS64_OpMIPS64SUBV_0(v *Value) bool { // match: (SUBV x x) // result: (MOVVconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpMIPS64MOVVconst) @@ -5789,11 +6009,10 @@ func rewriteValueMIPS64_OpMIPS64SUBV_0(v *Value) bool { // match: (SUBV (MOVVconst [0]) x) // result: (NEGV x) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst || v_0.AuxInt != 0 { break } + x := v_1 v.reset(OpMIPS64NEGV) v.AddArg(x) return true @@ -5801,13 +6020,14 @@ func rewriteValueMIPS64_OpMIPS64SUBV_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SUBVconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBVconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5817,7 +6037,6 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst_0(v *Value) bool { // result: (MOVVconst [d-c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5831,7 +6050,6 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst_0(v *Value) bool { // result: (ADDVconst [-c-d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64SUBVconst { break } @@ -5850,7 +6068,6 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst_0(v *Value) bool { // result: (ADDVconst [-c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64ADDVconst { break } @@ -5867,14 +6084,14 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64XOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XOR x (MOVVconst [c])) // cond: is32Bit(c) // result: (XORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMIPS64MOVVconst { continue } @@ -5892,8 +6109,8 @@ func rewriteValueMIPS64_OpMIPS64XOR_0(v *Value) bool { // match: (XOR x x) // result: (MOVVconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpMIPS64MOVVconst) @@ -5903,13 +6120,14 @@ func rewriteValueMIPS64_OpMIPS64XOR_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64XORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5921,7 +6139,7 @@ func rewriteValueMIPS64_OpMIPS64XORconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NORconst) v.AuxInt = 0 v.AddArg(x) @@ -5931,7 +6149,6 @@ func rewriteValueMIPS64_OpMIPS64XORconst_0(v *Value) bool { // result: (MOVVconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64MOVVconst { break } @@ -5945,7 +6162,6 @@ func rewriteValueMIPS64_OpMIPS64XORconst_0(v *Value) bool { // result: (XORconst [c^d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpMIPS64XORconst { break } @@ -5962,13 +6178,15 @@ func rewriteValueMIPS64_OpMIPS64XORconst_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -5982,13 +6200,15 @@ func rewriteValueMIPS64_OpMod16_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -6002,13 +6222,15 @@ func rewriteValueMIPS64_OpMod16u_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -6022,13 +6244,15 @@ func rewriteValueMIPS64_OpMod32_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -6042,13 +6266,15 @@ func rewriteValueMIPS64_OpMod32u_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod64 x y) // result: (Select0 (DIVV x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v0.AddArg(x) @@ -6058,13 +6284,15 @@ func rewriteValueMIPS64_OpMod64_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod64u x y) // result: (Select0 (DIVVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -6074,13 +6302,15 @@ func rewriteValueMIPS64_OpMod64u_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -6094,13 +6324,15 @@ func rewriteValueMIPS64_OpMod8_0(v *Value) bool { } } func rewriteValueMIPS64_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect0) v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -6114,6 +6346,9 @@ func rewriteValueMIPS64_OpMod8u_0(v *Value) bool { } } func rewriteValueMIPS64_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -6122,7 +6357,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -6134,9 +6369,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) @@ -6154,9 +6389,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -6175,9 +6410,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 1 v.AddArg(dst) @@ -6204,9 +6439,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -6227,9 +6462,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -6257,9 +6492,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 3 v.AddArg(dst) @@ -6304,9 +6539,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%8 == 0) { break } @@ -6327,9 +6562,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -6359,9 +6594,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -6404,6 +6639,9 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool { return false } func rewriteValueMIPS64_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -6413,9 +6651,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpMIPS64MOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -6451,9 +6689,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -6492,9 +6730,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -6533,9 +6771,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%8 == 0) { break } @@ -6565,9 +6803,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%8 == 0) { break } @@ -6604,9 +6842,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice) { break } @@ -6623,9 +6861,9 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 24 || t.(*types.Type).Alignment()%8 != 0) { break } @@ -6643,13 +6881,15 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool { return false } func rewriteValueMIPS64_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul16 x y) // result: (Select1 (MULVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -6659,13 +6899,15 @@ func rewriteValueMIPS64_OpMul16_0(v *Value) bool { } } func rewriteValueMIPS64_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul32 x y) // result: (Select1 (MULVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -6675,11 +6917,13 @@ func rewriteValueMIPS64_OpMul32_0(v *Value) bool { } } func rewriteValueMIPS64_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (MULF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64MULF) v.AddArg(x) v.AddArg(y) @@ -6687,13 +6931,15 @@ func rewriteValueMIPS64_OpMul32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul64 x y) // result: (Select1 (MULVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -6703,11 +6949,13 @@ func rewriteValueMIPS64_OpMul64_0(v *Value) bool { } } func rewriteValueMIPS64_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (MULD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64MULD) v.AddArg(x) v.AddArg(y) @@ -6715,11 +6963,13 @@ func rewriteValueMIPS64_OpMul64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpMul64uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64uhilo x y) // result: (MULVU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64MULVU) v.AddArg(x) v.AddArg(y) @@ -6727,13 +6977,15 @@ func rewriteValueMIPS64_OpMul64uhilo_0(v *Value) bool { } } func rewriteValueMIPS64_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul8 x y) // result: (Select1 (MULVU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpSelect1) v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg(x) @@ -6743,73 +6995,81 @@ func rewriteValueMIPS64_OpMul8_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (NEGV x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NEGV) v.AddArg(x) return true } } func rewriteValueMIPS64_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (NEGV x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NEGV) v.AddArg(x) return true } } func rewriteValueMIPS64_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (NEGF x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NEGF) v.AddArg(x) return true } } func rewriteValueMIPS64_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64 x) // result: (NEGV x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NEGV) v.AddArg(x) return true } } func rewriteValueMIPS64_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (NEGD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NEGD) v.AddArg(x) return true } } func rewriteValueMIPS64_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (NEGV x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64NEGV) v.AddArg(x) return true } } func rewriteValueMIPS64_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -6826,13 +7086,15 @@ func rewriteValueMIPS64_OpNeq16_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32 x y) // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -6849,12 +7111,14 @@ func rewriteValueMIPS64_OpNeq32_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (FPFlagFalse (CMPEQF x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) v0.AddArg(x) @@ -6864,13 +7128,15 @@ func rewriteValueMIPS64_OpNeq32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq64 x y) // result: (SGTU (XOR x y) (MOVVconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v0.AddArg(x) @@ -6883,12 +7149,14 @@ func rewriteValueMIPS64_OpNeq64_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (FPFlagFalse (CMPEQD x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64FPFlagFalse) v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) v0.AddArg(x) @@ -6898,13 +7166,15 @@ func rewriteValueMIPS64_OpNeq64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -6921,11 +7191,13 @@ func rewriteValueMIPS64_OpNeq8_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v.AddArg(x) v.AddArg(y) @@ -6933,13 +7205,15 @@ func rewriteValueMIPS64_OpNeqB_0(v *Value) bool { } } func rewriteValueMIPS64_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NeqPtr x y) // result: (SGTU (XOR x y) (MOVVconst [0])) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v0.AddArg(x) @@ -6952,11 +7226,13 @@ func rewriteValueMIPS64_OpNeqPtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64LoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -6964,10 +7240,11 @@ func rewriteValueMIPS64_OpNilCheck_0(v *Value) bool { } } func rewriteValueMIPS64_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64XORconst) v.AuxInt = 1 v.AddArg(x) @@ -6975,11 +7252,12 @@ func rewriteValueMIPS64_OpNot_0(v *Value) bool { } } func rewriteValueMIPS64_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) // result: (MOVVaddr [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if ptr.Op != OpSP { break } @@ -6992,7 +7270,7 @@ func rewriteValueMIPS64_OpOffPtr_0(v *Value) bool { // result: (ADDVconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpMIPS64ADDVconst) v.AuxInt = off v.AddArg(ptr) @@ -7000,11 +7278,13 @@ func rewriteValueMIPS64_OpOffPtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64OR) v.AddArg(x) v.AddArg(y) @@ -7012,11 +7292,13 @@ func rewriteValueMIPS64_OpOr16_0(v *Value) bool { } } func rewriteValueMIPS64_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64OR) v.AddArg(x) v.AddArg(y) @@ -7024,11 +7306,13 @@ func rewriteValueMIPS64_OpOr32_0(v *Value) bool { } } func rewriteValueMIPS64_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or64 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64OR) v.AddArg(x) v.AddArg(y) @@ -7036,11 +7320,13 @@ func rewriteValueMIPS64_OpOr64_0(v *Value) bool { } } func rewriteValueMIPS64_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64OR) v.AddArg(x) v.AddArg(y) @@ -7048,11 +7334,13 @@ func rewriteValueMIPS64_OpOr8_0(v *Value) bool { } } func rewriteValueMIPS64_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64OR) v.AddArg(x) v.AddArg(y) @@ -7060,14 +7348,17 @@ func rewriteValueMIPS64_OpOrB_0(v *Value) bool { } } func rewriteValueMIPS64_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -7083,9 +7374,9 @@ func rewriteValueMIPS64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -7101,9 +7392,9 @@ func rewriteValueMIPS64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -7117,15 +7408,15 @@ func rewriteValueMIPS64_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueMIPS64_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVVconst [c])) // result: (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -7148,15 +7439,15 @@ func rewriteValueMIPS64_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueMIPS64_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft32 x (MOVVconst [c])) // result: (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -7179,15 +7470,15 @@ func rewriteValueMIPS64_OpRotateLeft32_0(v *Value) bool { return false } func rewriteValueMIPS64_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft64 x (MOVVconst [c])) // result: (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -7210,15 +7501,15 @@ func rewriteValueMIPS64_OpRotateLeft64_0(v *Value) bool { return false } func rewriteValueMIPS64_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVVconst [c])) // result: (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpMIPS64MOVVconst { break } @@ -7241,10 +7532,11 @@ func rewriteValueMIPS64_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueMIPS64_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -7252,10 +7544,11 @@ func rewriteValueMIPS64_OpRound32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -7263,14 +7556,16 @@ func rewriteValueMIPS64_OpRound64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7294,14 +7589,16 @@ func rewriteValueMIPS64_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7325,14 +7622,16 @@ func rewriteValueMIPS64_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7352,14 +7651,16 @@ func rewriteValueMIPS64_OpRsh16Ux64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7383,14 +7684,16 @@ func rewriteValueMIPS64_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -7414,14 +7717,16 @@ func rewriteValueMIPS64_OpRsh16x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -7445,14 +7750,16 @@ func rewriteValueMIPS64_OpRsh16x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -7472,14 +7779,16 @@ func rewriteValueMIPS64_OpRsh16x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -7503,14 +7812,16 @@ func rewriteValueMIPS64_OpRsh16x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7534,14 +7845,16 @@ func rewriteValueMIPS64_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7565,14 +7878,16 @@ func rewriteValueMIPS64_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7592,14 +7907,16 @@ func rewriteValueMIPS64_OpRsh32Ux64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7623,14 +7940,16 @@ func rewriteValueMIPS64_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -7654,14 +7973,16 @@ func rewriteValueMIPS64_OpRsh32x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -7685,14 +8006,16 @@ func rewriteValueMIPS64_OpRsh32x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x y) // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -7712,14 +8035,16 @@ func rewriteValueMIPS64_OpRsh32x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -7743,14 +8068,16 @@ func rewriteValueMIPS64_OpRsh32x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7772,14 +8099,16 @@ func rewriteValueMIPS64_OpRsh64Ux16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7801,14 +8130,16 @@ func rewriteValueMIPS64_OpRsh64Ux32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7826,14 +8157,16 @@ func rewriteValueMIPS64_OpRsh64Ux64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7855,14 +8188,16 @@ func rewriteValueMIPS64_OpRsh64Ux8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) @@ -7884,14 +8219,16 @@ func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x y) // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) @@ -7913,14 +8250,16 @@ func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x64 x y) // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) @@ -7938,14 +8277,16 @@ func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) @@ -7967,14 +8308,16 @@ func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -7998,14 +8341,16 @@ func rewriteValueMIPS64_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -8029,14 +8374,16 @@ func rewriteValueMIPS64_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -8056,14 +8403,16 @@ func rewriteValueMIPS64_OpRsh8Ux64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) @@ -8087,14 +8436,16 @@ func rewriteValueMIPS64_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -8118,14 +8469,16 @@ func rewriteValueMIPS64_OpRsh8x16_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -8149,14 +8502,16 @@ func rewriteValueMIPS64_OpRsh8x32_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -8176,14 +8531,16 @@ func rewriteValueMIPS64_OpRsh8x64_0(v *Value) bool { } } func rewriteValueMIPS64_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SRAV) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -8207,10 +8564,10 @@ func rewriteValueMIPS64_OpRsh8x8_0(v *Value) bool { } } func rewriteValueMIPS64_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] // match: (Select0 (DIVVU _ (MOVVconst [1]))) // result: (MOVVconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVVU { break } @@ -8227,7 +8584,6 @@ func rewriteValueMIPS64_OpSelect0_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (ANDconst [c-1] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVVU { break } @@ -8249,7 +8605,6 @@ func rewriteValueMIPS64_OpSelect0_0(v *Value) bool { // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [c%d]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVV { break } @@ -8271,7 +8626,6 @@ func rewriteValueMIPS64_OpSelect0_0(v *Value) bool { // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [int64(uint64(c)%uint64(d))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVVU { break } @@ -8293,17 +8647,18 @@ func rewriteValueMIPS64_OpSelect0_0(v *Value) bool { return false } func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] // match: (Select1 (MULVU x (MOVVconst [-1]))) // result: (NEGV x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MULVU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 { continue } @@ -8316,13 +8671,13 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // match: (Select1 (MULVU _ (MOVVconst [0]))) // result: (MOVVconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MULVU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { continue } @@ -8335,14 +8690,14 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // match: (Select1 (MULVU x (MOVVconst [1]))) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MULVU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { continue } @@ -8357,14 +8712,14 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SLLVconst [log2(c)] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MULVU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpMIPS64MOVVconst { continue } @@ -8382,7 +8737,6 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // match: (Select1 (DIVVU x (MOVVconst [1]))) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVVU { break } @@ -8401,7 +8755,6 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (SRLVconst [log2(c)] x) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVVU { break } @@ -8423,18 +8776,17 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [c*d]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64MULVU { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpMIPS64MOVVconst { continue } c := v_0_0.AuxInt - v_0_1 := v_0.Args[1^_i0] if v_0_1.Op != OpMIPS64MOVVconst { continue } @@ -8448,7 +8800,6 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [c/d]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVV { break } @@ -8470,7 +8821,6 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) // result: (MOVVconst [int64(uint64(c)/uint64(d))]) for { - v_0 := v.Args[0] if v_0.Op != OpMIPS64DIVVU { break } @@ -8492,72 +8842,79 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool { return false } func rewriteValueMIPS64_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVHreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to64 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVHreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt32to64 x) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVWreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVBreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVBreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to64 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVBreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SRAVconst (NEGV x) [63]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpMIPS64SRAVconst) v.AuxInt = 63 v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) @@ -8567,22 +8924,24 @@ func rewriteValueMIPS64_OpSlicemask_0(v *Value) bool { } } func rewriteValueMIPS64_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (SQRTD x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64SQRTD) v.AddArg(x) return true } } func rewriteValueMIPS64_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpMIPS64CALLstatic) v.AuxInt = argwid v.Aux = target @@ -8591,14 +8950,17 @@ func rewriteValueMIPS64_OpStaticCall_0(v *Value) bool { } } func rewriteValueMIPS64_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 1 // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -8613,9 +8975,9 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -8630,9 +8992,9 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { break } @@ -8647,9 +9009,9 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool { // result: (MOVVstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { break } @@ -8664,9 +9026,9 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool { // result: (MOVFstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -8681,9 +9043,9 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -8696,11 +9058,13 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool { return false } func rewriteValueMIPS64_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUBV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBV) v.AddArg(x) v.AddArg(y) @@ -8708,11 +9072,13 @@ func rewriteValueMIPS64_OpSub16_0(v *Value) bool { } } func rewriteValueMIPS64_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUBV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBV) v.AddArg(x) v.AddArg(y) @@ -8720,11 +9086,13 @@ func rewriteValueMIPS64_OpSub32_0(v *Value) bool { } } func rewriteValueMIPS64_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (SUBF x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBF) v.AddArg(x) v.AddArg(y) @@ -8732,11 +9100,13 @@ func rewriteValueMIPS64_OpSub32F_0(v *Value) bool { } } func rewriteValueMIPS64_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64 x y) // result: (SUBV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBV) v.AddArg(x) v.AddArg(y) @@ -8744,11 +9114,13 @@ func rewriteValueMIPS64_OpSub64_0(v *Value) bool { } } func rewriteValueMIPS64_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (SUBD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBD) v.AddArg(x) v.AddArg(y) @@ -8756,11 +9128,13 @@ func rewriteValueMIPS64_OpSub64F_0(v *Value) bool { } } func rewriteValueMIPS64_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUBV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBV) v.AddArg(x) v.AddArg(y) @@ -8768,11 +9142,13 @@ func rewriteValueMIPS64_OpSub8_0(v *Value) bool { } } func rewriteValueMIPS64_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUBV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64SUBV) v.AddArg(x) v.AddArg(y) @@ -8780,10 +9156,11 @@ func rewriteValueMIPS64_OpSubPtr_0(v *Value) bool { } } func rewriteValueMIPS64_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8791,10 +9168,11 @@ func rewriteValueMIPS64_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueMIPS64_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8802,10 +9180,11 @@ func rewriteValueMIPS64_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueMIPS64_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8813,10 +9192,11 @@ func rewriteValueMIPS64_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueMIPS64_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8824,10 +9204,11 @@ func rewriteValueMIPS64_OpTrunc64to16_0(v *Value) bool { } } func rewriteValueMIPS64_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8835,10 +9216,11 @@ func rewriteValueMIPS64_OpTrunc64to32_0(v *Value) bool { } } func rewriteValueMIPS64_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -8846,13 +9228,16 @@ func rewriteValueMIPS64_OpTrunc64to8_0(v *Value) bool { } } func rewriteValueMIPS64_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpMIPS64LoweredWB) v.Aux = fn v.AddArg(destptr) @@ -8862,11 +9247,13 @@ func rewriteValueMIPS64_OpWB_0(v *Value) bool { } } func rewriteValueMIPS64_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v.AddArg(x) v.AddArg(y) @@ -8874,11 +9261,13 @@ func rewriteValueMIPS64_OpXor16_0(v *Value) bool { } } func rewriteValueMIPS64_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v.AddArg(x) v.AddArg(y) @@ -8886,11 +9275,13 @@ func rewriteValueMIPS64_OpXor32_0(v *Value) bool { } } func rewriteValueMIPS64_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor64 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v.AddArg(x) v.AddArg(y) @@ -8898,11 +9289,13 @@ func rewriteValueMIPS64_OpXor64_0(v *Value) bool { } } func rewriteValueMIPS64_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMIPS64XOR) v.AddArg(x) v.AddArg(y) @@ -8910,6 +9303,8 @@ func rewriteValueMIPS64_OpXor8_0(v *Value) bool { } } func rewriteValueMIPS64_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [0] _ mem) @@ -8918,7 +9313,7 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -8930,8 +9325,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64MOVBstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) @@ -8948,8 +9343,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -8967,8 +9362,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 1 v.AddArg(ptr) @@ -8993,8 +9388,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -9014,8 +9409,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -9041,8 +9436,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 3 v.AddArg(ptr) @@ -9081,8 +9476,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%8 == 0) { break } @@ -9102,8 +9497,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -9131,8 +9526,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -9169,6 +9564,8 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool { return false } func rewriteValueMIPS64_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -9178,8 +9575,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpMIPS64MOVBstore) v.AuxInt = 2 v.AddArg(ptr) @@ -9211,8 +9608,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%2 == 0) { break } @@ -9247,8 +9644,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -9283,8 +9680,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%8 == 0) { break } @@ -9312,8 +9709,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%8 == 0) { break } @@ -9346,8 +9743,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice) { break } @@ -9363,8 +9760,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !((s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0) { break } @@ -9381,60 +9778,66 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool { return false } func rewriteValueMIPS64_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVHUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVHUreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to64 x) // result: (MOVHUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVHUreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt32to64 x) // result: (MOVWUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVWUreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVBUreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVBUreg) v.AddArg(x) return true } } func rewriteValueMIPS64_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to64 x) // result: (MOVBUreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpMIPS64MOVBUreg) v.AddArg(x) return true diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index bb6eb60efe..9ac9cf3aef 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -725,21 +725,24 @@ func rewriteValuePPC64(v *Value) bool { return false } func rewriteValuePPC64_OpAbs_0(v *Value) bool { + v_0 := v.Args[0] // match: (Abs x) // result: (FABS x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FABS) v.AddArg(x) return true } } func rewriteValuePPC64_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ADD) v.AddArg(x) v.AddArg(y) @@ -747,11 +750,13 @@ func rewriteValuePPC64_OpAdd16_0(v *Value) bool { } } func rewriteValuePPC64_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ADD) v.AddArg(x) v.AddArg(y) @@ -759,11 +764,13 @@ func rewriteValuePPC64_OpAdd32_0(v *Value) bool { } } func rewriteValuePPC64_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (FADDS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FADDS) v.AddArg(x) v.AddArg(y) @@ -771,11 +778,13 @@ func rewriteValuePPC64_OpAdd32F_0(v *Value) bool { } } func rewriteValuePPC64_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ADD) v.AddArg(x) v.AddArg(y) @@ -783,11 +792,13 @@ func rewriteValuePPC64_OpAdd64_0(v *Value) bool { } } func rewriteValuePPC64_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (FADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FADD) v.AddArg(x) v.AddArg(y) @@ -795,12 +806,15 @@ func rewriteValuePPC64_OpAdd64F_0(v *Value) bool { } } func rewriteValuePPC64_OpAdd64carry_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64carry x y c) // result: (LoweredAdd64Carry x y c) for { - c := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + c := v_2 v.reset(OpPPC64LoweredAdd64Carry) v.AddArg(x) v.AddArg(y) @@ -809,11 +823,13 @@ func rewriteValuePPC64_OpAdd64carry_0(v *Value) bool { } } func rewriteValuePPC64_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ADD) v.AddArg(x) v.AddArg(y) @@ -821,11 +837,13 @@ func rewriteValuePPC64_OpAdd8_0(v *Value) bool { } } func rewriteValuePPC64_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ADD) v.AddArg(x) v.AddArg(y) @@ -833,11 +851,12 @@ func rewriteValuePPC64_OpAddPtr_0(v *Value) bool { } } func rewriteValuePPC64_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVDaddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpPPC64MOVDaddr) v.Aux = sym v.AddArg(base) @@ -845,11 +864,13 @@ func rewriteValuePPC64_OpAddr_0(v *Value) bool { } } func rewriteValuePPC64_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64AND) v.AddArg(x) v.AddArg(y) @@ -857,11 +878,13 @@ func rewriteValuePPC64_OpAnd16_0(v *Value) bool { } } func rewriteValuePPC64_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64AND) v.AddArg(x) v.AddArg(y) @@ -869,11 +892,13 @@ func rewriteValuePPC64_OpAnd32_0(v *Value) bool { } } func rewriteValuePPC64_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And64 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64AND) v.AddArg(x) v.AddArg(y) @@ -881,11 +906,13 @@ func rewriteValuePPC64_OpAnd64_0(v *Value) bool { } } func rewriteValuePPC64_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64AND) v.AddArg(x) v.AddArg(y) @@ -893,11 +920,13 @@ func rewriteValuePPC64_OpAnd8_0(v *Value) bool { } } func rewriteValuePPC64_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64AND) v.AddArg(x) v.AddArg(y) @@ -905,12 +934,15 @@ func rewriteValuePPC64_OpAndB_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicAdd32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd32 ptr val mem) // result: (LoweredAtomicAdd32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicAdd32) v.AddArg(ptr) v.AddArg(val) @@ -919,12 +951,15 @@ func rewriteValuePPC64_OpAtomicAdd32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicAdd64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAdd64 ptr val mem) // result: (LoweredAtomicAdd64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicAdd64) v.AddArg(ptr) v.AddArg(val) @@ -933,12 +968,15 @@ func rewriteValuePPC64_OpAtomicAdd64_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicAnd8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicAnd8 ptr val mem) // result: (LoweredAtomicAnd8 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicAnd8) v.AddArg(ptr) v.AddArg(val) @@ -947,13 +985,17 @@ func rewriteValuePPC64_OpAtomicAnd8_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap32 ptr old new_ mem) // result: (LoweredAtomicCas32 [1] ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpPPC64LoweredAtomicCas32) v.AuxInt = 1 v.AddArg(ptr) @@ -964,13 +1006,17 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap64 ptr old new_ mem) // result: (LoweredAtomicCas64 [1] ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpPPC64LoweredAtomicCas64) v.AuxInt = 1 v.AddArg(ptr) @@ -981,13 +1027,17 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicCompareAndSwapRel32_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwapRel32 ptr old new_ mem) // result: (LoweredAtomicCas32 [0] ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpPPC64LoweredAtomicCas32) v.AuxInt = 0 v.AddArg(ptr) @@ -998,12 +1048,15 @@ func rewriteValuePPC64_OpAtomicCompareAndSwapRel32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicExchange32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange32 ptr val mem) // result: (LoweredAtomicExchange32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicExchange32) v.AddArg(ptr) v.AddArg(val) @@ -1012,12 +1065,15 @@ func rewriteValuePPC64_OpAtomicExchange32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicExchange64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange64 ptr val mem) // result: (LoweredAtomicExchange64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicExchange64) v.AddArg(ptr) v.AddArg(val) @@ -1026,11 +1082,13 @@ func rewriteValuePPC64_OpAtomicExchange64_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoad32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad32 ptr mem) // result: (LoweredAtomicLoad32 [1] ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredAtomicLoad32) v.AuxInt = 1 v.AddArg(ptr) @@ -1039,11 +1097,13 @@ func rewriteValuePPC64_OpAtomicLoad32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoad64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad64 ptr mem) // result: (LoweredAtomicLoad64 [1] ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredAtomicLoad64) v.AuxInt = 1 v.AddArg(ptr) @@ -1052,11 +1112,13 @@ func rewriteValuePPC64_OpAtomicLoad64_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoad8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad8 ptr mem) // result: (LoweredAtomicLoad8 [1] ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredAtomicLoad8) v.AuxInt = 1 v.AddArg(ptr) @@ -1065,11 +1127,13 @@ func rewriteValuePPC64_OpAtomicLoad8_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoadAcq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadAcq32 ptr mem) // result: (LoweredAtomicLoad32 [0] ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredAtomicLoad32) v.AuxInt = 0 v.AddArg(ptr) @@ -1078,11 +1142,13 @@ func rewriteValuePPC64_OpAtomicLoadAcq32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoadPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadPtr ptr mem) // result: (LoweredAtomicLoadPtr [1] ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredAtomicLoadPtr) v.AuxInt = 1 v.AddArg(ptr) @@ -1091,12 +1157,15 @@ func rewriteValuePPC64_OpAtomicLoadPtr_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicOr8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicOr8 ptr val mem) // result: (LoweredAtomicOr8 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicOr8) v.AddArg(ptr) v.AddArg(val) @@ -1105,12 +1174,15 @@ func rewriteValuePPC64_OpAtomicOr8_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore32 ptr val mem) // result: (LoweredAtomicStore32 [1] ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicStore32) v.AuxInt = 1 v.AddArg(ptr) @@ -1120,12 +1192,15 @@ func rewriteValuePPC64_OpAtomicStore32_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicStore64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore64 ptr val mem) // result: (LoweredAtomicStore64 [1] ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicStore64) v.AuxInt = 1 v.AddArg(ptr) @@ -1135,12 +1210,15 @@ func rewriteValuePPC64_OpAtomicStore64_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicStore8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStore8 ptr val mem) // result: (LoweredAtomicStore8 [1] ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicStore8) v.AuxInt = 1 v.AddArg(ptr) @@ -1150,12 +1228,15 @@ func rewriteValuePPC64_OpAtomicStore8_0(v *Value) bool { } } func rewriteValuePPC64_OpAtomicStoreRel32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStoreRel32 ptr val mem) // result: (LoweredAtomicStore32 [0] ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpPPC64LoweredAtomicStore32) v.AuxInt = 0 v.AddArg(ptr) @@ -1165,13 +1246,15 @@ func rewriteValuePPC64_OpAtomicStoreRel32_0(v *Value) bool { } } func rewriteValuePPC64_OpAvg64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Avg64u x y) // result: (ADD (SRDconst (SUB x y) [1]) y) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ADD) v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t) v0.AuxInt = 1 @@ -1185,12 +1268,13 @@ func rewriteValuePPC64_OpAvg64u_0(v *Value) bool { } } func rewriteValuePPC64_OpBitLen32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen32 x) // result: (SUB (MOVDconst [32]) (CNTLZW x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64SUB) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 32 @@ -1202,12 +1286,13 @@ func rewriteValuePPC64_OpBitLen32_0(v *Value) bool { } } func rewriteValuePPC64_OpBitLen64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (SUB (MOVDconst [64]) (CNTLZD x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64SUB) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = 64 @@ -1219,23 +1304,27 @@ func rewriteValuePPC64_OpBitLen64_0(v *Value) bool { } } func rewriteValuePPC64_OpCeil_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ceil x) // result: (FCEIL x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FCEIL) v.AddArg(x) return true } } func rewriteValuePPC64_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpPPC64CALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -1245,10 +1334,11 @@ func rewriteValuePPC64_OpClosureCall_0(v *Value) bool { } } func rewriteValuePPC64_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (NOR x x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NOR) v.AddArg(x) v.AddArg(x) @@ -1256,10 +1346,11 @@ func rewriteValuePPC64_OpCom16_0(v *Value) bool { } } func rewriteValuePPC64_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (NOR x x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NOR) v.AddArg(x) v.AddArg(x) @@ -1267,10 +1358,11 @@ func rewriteValuePPC64_OpCom32_0(v *Value) bool { } } func rewriteValuePPC64_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com64 x) // result: (NOR x x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NOR) v.AddArg(x) v.AddArg(x) @@ -1278,10 +1370,11 @@ func rewriteValuePPC64_OpCom64_0(v *Value) bool { } } func rewriteValuePPC64_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (NOR x x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NOR) v.AddArg(x) v.AddArg(x) @@ -1289,14 +1382,17 @@ func rewriteValuePPC64_OpCom8_0(v *Value) bool { } } func rewriteValuePPC64_OpCondSelect_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CondSelect x y bool) // cond: flagArg(bool) != nil // result: (ISEL [2] x y bool) for { - bool := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + bool := v_2 if !(flagArg(bool) != nil) { break } @@ -1311,9 +1407,9 @@ func rewriteValuePPC64_OpCondSelect_0(v *Value) bool { // cond: flagArg(bool) == nil // result: (ISEL [2] x y (CMPWconst [0] bool)) for { - bool := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + bool := v_2 if !(flagArg(bool) == nil) { break } @@ -1409,11 +1505,13 @@ func rewriteValuePPC64_OpConstNil_0(v *Value) bool { } } func rewriteValuePPC64_OpCopysign_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Copysign x y) // result: (FCPSGN y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FCPSGN) v.AddArg(y) v.AddArg(x) @@ -1421,12 +1519,13 @@ func rewriteValuePPC64_OpCopysign_0(v *Value) bool { } } func rewriteValuePPC64_OpCtz16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) // result: (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x))) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64POPCNTW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16) @@ -1441,13 +1540,14 @@ func rewriteValuePPC64_OpCtz16_0(v *Value) bool { } } func rewriteValuePPC64_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz32 x) // cond: objabi.GOPPC64<=8 // result: (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x))) for { - x := v.Args[0] + x := v_0 if !(objabi.GOPPC64 <= 8) { break } @@ -1466,7 +1566,7 @@ func rewriteValuePPC64_OpCtz32_0(v *Value) bool { // match: (Ctz32 x) // result: (CNTTZW (MOVWZreg x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64CNTTZW) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) v0.AddArg(x) @@ -1475,23 +1575,25 @@ func rewriteValuePPC64_OpCtz32_0(v *Value) bool { } } func rewriteValuePPC64_OpCtz32NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz32NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValuePPC64_OpCtz64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz64 x) // cond: objabi.GOPPC64<=8 // result: (POPCNTD (ANDN (ADDconst [-1] x) x)) for { - x := v.Args[0] + x := v_0 if !(objabi.GOPPC64 <= 8) { break } @@ -1508,29 +1610,31 @@ func rewriteValuePPC64_OpCtz64_0(v *Value) bool { // match: (Ctz64 x) // result: (CNTTZD x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64CNTTZD) v.AddArg(x) return true } } func rewriteValuePPC64_OpCtz64NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz64NonZero x) // result: (Ctz64 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz64) v.AddArg(x) return true } } func rewriteValuePPC64_OpCtz8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) // result: (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x))) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64POPCNTB) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8) @@ -1545,12 +1649,13 @@ func rewriteValuePPC64_OpCtz8_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32Fto32 x) // result: (MFVSRD (FCTIWZ x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64) v0.AddArg(x) @@ -1559,12 +1664,13 @@ func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32Fto64 x) // result: (MFVSRD (FCTIDZ x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64) v0.AddArg(x) @@ -1573,10 +1679,11 @@ func rewriteValuePPC64_OpCvt32Fto64_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1584,12 +1691,13 @@ func rewriteValuePPC64_OpCvt32Fto64F_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32to32F x) // result: (FCFIDS (MTVSRD (SignExt32to64 x))) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FCFIDS) v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -1600,12 +1708,13 @@ func rewriteValuePPC64_OpCvt32to32F_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32to64F x) // result: (FCFID (MTVSRD (SignExt32to64 x))) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FCFID) v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -1616,12 +1725,13 @@ func rewriteValuePPC64_OpCvt32to64F_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt64Fto32 x) // result: (MFVSRD (FCTIWZ x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64) v0.AddArg(x) @@ -1630,22 +1740,24 @@ func rewriteValuePPC64_OpCvt64Fto32_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (FRSP x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FRSP) v.AddArg(x) return true } } func rewriteValuePPC64_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt64Fto64 x) // result: (MFVSRD (FCTIDZ x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64) v0.AddArg(x) @@ -1654,12 +1766,13 @@ func rewriteValuePPC64_OpCvt64Fto64_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt64to32F x) // result: (FCFIDS (MTVSRD x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FCFIDS) v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) v0.AddArg(x) @@ -1668,12 +1781,13 @@ func rewriteValuePPC64_OpCvt64to32F_0(v *Value) bool { } } func rewriteValuePPC64_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt64to64F x) // result: (FCFID (MTVSRD x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FCFID) v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) v0.AddArg(x) @@ -1682,13 +1796,15 @@ func rewriteValuePPC64_OpCvt64to64F_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -1700,13 +1816,15 @@ func rewriteValuePPC64_OpDiv16_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -1718,11 +1836,13 @@ func rewriteValuePPC64_OpDiv16u_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32 x y) // result: (DIVW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVW) v.AddArg(x) v.AddArg(y) @@ -1730,11 +1850,13 @@ func rewriteValuePPC64_OpDiv32_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (FDIVS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FDIVS) v.AddArg(x) v.AddArg(y) @@ -1742,11 +1864,13 @@ func rewriteValuePPC64_OpDiv32F_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32u x y) // result: (DIVWU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVWU) v.AddArg(x) v.AddArg(y) @@ -1754,11 +1878,13 @@ func rewriteValuePPC64_OpDiv32u_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64 x y) // result: (DIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVD) v.AddArg(x) v.AddArg(y) @@ -1766,11 +1892,13 @@ func rewriteValuePPC64_OpDiv64_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (FDIV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FDIV) v.AddArg(x) v.AddArg(y) @@ -1778,11 +1906,13 @@ func rewriteValuePPC64_OpDiv64F_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64u x y) // result: (DIVDU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVDU) v.AddArg(x) v.AddArg(y) @@ -1790,13 +1920,15 @@ func rewriteValuePPC64_OpDiv64u_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -1808,13 +1940,15 @@ func rewriteValuePPC64_OpDiv8_0(v *Value) bool { } } func rewriteValuePPC64_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64DIVWU) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -1826,16 +1960,17 @@ func rewriteValuePPC64_OpDiv8u_0(v *Value) bool { } } func rewriteValuePPC64_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // cond: isSigned(x.Type) && isSigned(y.Type) // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - y := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 if !(isSigned(x.Type) && isSigned(y.Type)) { continue } @@ -1855,8 +1990,8 @@ func rewriteValuePPC64_OpEq16_0(v *Value) bool { // match: (Eq16 x y) // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -1870,12 +2005,14 @@ func rewriteValuePPC64_OpEq16_0(v *Value) bool { } } func rewriteValuePPC64_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32 x y) // result: (Equal (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v0.AddArg(x) @@ -1885,12 +2022,14 @@ func rewriteValuePPC64_OpEq32_0(v *Value) bool { } } func rewriteValuePPC64_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (Equal (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -1900,12 +2039,14 @@ func rewriteValuePPC64_OpEq32F_0(v *Value) bool { } } func rewriteValuePPC64_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64 x y) // result: (Equal (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -1915,12 +2056,14 @@ func rewriteValuePPC64_OpEq64_0(v *Value) bool { } } func rewriteValuePPC64_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (Equal (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -1930,16 +2073,17 @@ func rewriteValuePPC64_OpEq64F_0(v *Value) bool { } } func rewriteValuePPC64_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // cond: isSigned(x.Type) && isSigned(y.Type) // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - y := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 if !(isSigned(x.Type) && isSigned(y.Type)) { continue } @@ -1959,8 +2103,8 @@ func rewriteValuePPC64_OpEq8_0(v *Value) bool { // match: (Eq8 x y) // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -1974,13 +2118,15 @@ func rewriteValuePPC64_OpEq8_0(v *Value) bool { } } func rewriteValuePPC64_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (ANDconst [1] (EQV x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ANDconst) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) @@ -1991,12 +2137,14 @@ func rewriteValuePPC64_OpEqB_0(v *Value) bool { } } func rewriteValuePPC64_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (Equal (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64Equal) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -2006,12 +2154,15 @@ func rewriteValuePPC64_OpEqPtr_0(v *Value) bool { } } func rewriteValuePPC64_OpFMA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMA x y z) // result: (FMADD x y z) for { - z := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + z := v_2 v.reset(OpPPC64FMADD) v.AddArg(x) v.AddArg(y) @@ -2020,23 +2171,26 @@ func rewriteValuePPC64_OpFMA_0(v *Value) bool { } } func rewriteValuePPC64_OpFloor_0(v *Value) bool { + v_0 := v.Args[0] // match: (Floor x) // result: (FFLOOR x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FFLOOR) v.AddArg(x) return true } } func rewriteValuePPC64_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -2050,13 +2204,15 @@ func rewriteValuePPC64_OpGeq16_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -2070,12 +2226,14 @@ func rewriteValuePPC64_OpGeq16U_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32 x y) // result: (GreaterEqual (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v0.AddArg(x) @@ -2085,12 +2243,14 @@ func rewriteValuePPC64_OpGeq32_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32F x y) // result: (FGreaterEqual (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FGreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2100,12 +2260,14 @@ func rewriteValuePPC64_OpGeq32F_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq32U x y) // result: (GreaterEqual (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v0.AddArg(x) @@ -2115,12 +2277,14 @@ func rewriteValuePPC64_OpGeq32U_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64 x y) // result: (GreaterEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -2130,12 +2294,14 @@ func rewriteValuePPC64_OpGeq64_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64F x y) // result: (FGreaterEqual (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FGreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2145,12 +2311,14 @@ func rewriteValuePPC64_OpGeq64F_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Geq64U x y) // result: (GreaterEqual (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg(x) @@ -2160,13 +2328,15 @@ func rewriteValuePPC64_OpGeq64U_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -2180,13 +2350,15 @@ func rewriteValuePPC64_OpGeq8_0(v *Value) bool { } } func rewriteValuePPC64_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -2224,13 +2396,15 @@ func rewriteValuePPC64_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -2244,13 +2418,15 @@ func rewriteValuePPC64_OpGreater16_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -2264,12 +2440,14 @@ func rewriteValuePPC64_OpGreater16U_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32 x y) // result: (GreaterThan (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v0.AddArg(x) @@ -2279,12 +2457,14 @@ func rewriteValuePPC64_OpGreater32_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32F x y) // result: (FGreaterThan (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FGreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2294,12 +2474,14 @@ func rewriteValuePPC64_OpGreater32F_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater32U x y) // result: (GreaterThan (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v0.AddArg(x) @@ -2309,12 +2491,14 @@ func rewriteValuePPC64_OpGreater32U_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64 x y) // result: (GreaterThan (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -2324,12 +2508,14 @@ func rewriteValuePPC64_OpGreater64_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64F x y) // result: (FGreaterThan (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FGreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2339,12 +2525,14 @@ func rewriteValuePPC64_OpGreater64F_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Greater64U x y) // result: (GreaterThan (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg(x) @@ -2354,13 +2542,15 @@ func rewriteValuePPC64_OpGreater64U_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -2374,13 +2564,15 @@ func rewriteValuePPC64_OpGreater8_0(v *Value) bool { } } func rewriteValuePPC64_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64GreaterThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -2394,11 +2586,13 @@ func rewriteValuePPC64_OpGreater8U_0(v *Value) bool { } } func rewriteValuePPC64_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul32 x y) // result: (MULHW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULHW) v.AddArg(x) v.AddArg(y) @@ -2406,11 +2600,13 @@ func rewriteValuePPC64_OpHmul32_0(v *Value) bool { } } func rewriteValuePPC64_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul32u x y) // result: (MULHWU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULHWU) v.AddArg(x) v.AddArg(y) @@ -2418,11 +2614,13 @@ func rewriteValuePPC64_OpHmul32u_0(v *Value) bool { } } func rewriteValuePPC64_OpHmul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64 x y) // result: (MULHD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULHD) v.AddArg(x) v.AddArg(y) @@ -2430,11 +2628,13 @@ func rewriteValuePPC64_OpHmul64_0(v *Value) bool { } } func rewriteValuePPC64_OpHmul64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64u x y) // result: (MULHDU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULHDU) v.AddArg(x) v.AddArg(y) @@ -2442,12 +2642,14 @@ func rewriteValuePPC64_OpHmul64u_0(v *Value) bool { } } func rewriteValuePPC64_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpPPC64CALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -2456,12 +2658,14 @@ func rewriteValuePPC64_OpInterCall_0(v *Value) bool { } } func rewriteValuePPC64_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsInBounds idx len) // result: (LessThan (CMPU idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg(idx) @@ -2471,11 +2675,12 @@ func rewriteValuePPC64_OpIsInBounds_0(v *Value) bool { } } func rewriteValuePPC64_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (IsNonNil ptr) // result: (NotEqual (CMPconst [0] ptr)) for { - ptr := v.Args[0] + ptr := v_0 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) v0.AuxInt = 0 @@ -2485,12 +2690,14 @@ func rewriteValuePPC64_OpIsNonNil_0(v *Value) bool { } } func rewriteValuePPC64_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (IsSliceInBounds idx len) // result: (LessEqual (CMPU idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg(idx) @@ -2500,13 +2707,15 @@ func rewriteValuePPC64_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -2520,13 +2729,15 @@ func rewriteValuePPC64_OpLeq16_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -2540,12 +2751,14 @@ func rewriteValuePPC64_OpLeq16U_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32 x y) // result: (LessEqual (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v0.AddArg(x) @@ -2555,12 +2768,14 @@ func rewriteValuePPC64_OpLeq32_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (FLessEqual (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FLessEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2570,12 +2785,14 @@ func rewriteValuePPC64_OpLeq32F_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq32U x y) // result: (LessEqual (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v0.AddArg(x) @@ -2585,12 +2802,14 @@ func rewriteValuePPC64_OpLeq32U_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64 x y) // result: (LessEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -2600,12 +2819,14 @@ func rewriteValuePPC64_OpLeq64_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (FLessEqual (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FLessEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2615,12 +2836,14 @@ func rewriteValuePPC64_OpLeq64F_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Leq64U x y) // result: (LessEqual (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg(x) @@ -2630,13 +2853,15 @@ func rewriteValuePPC64_OpLeq64U_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -2650,13 +2875,15 @@ func rewriteValuePPC64_OpLeq8_0(v *Value) bool { } } func rewriteValuePPC64_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -2670,13 +2897,15 @@ func rewriteValuePPC64_OpLeq8U_0(v *Value) bool { } } func rewriteValuePPC64_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) @@ -2690,13 +2919,15 @@ func rewriteValuePPC64_OpLess16_0(v *Value) bool { } } func rewriteValuePPC64_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -2710,12 +2941,14 @@ func rewriteValuePPC64_OpLess16U_0(v *Value) bool { } } func rewriteValuePPC64_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32 x y) // result: (LessThan (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v0.AddArg(x) @@ -2725,12 +2958,14 @@ func rewriteValuePPC64_OpLess32_0(v *Value) bool { } } func rewriteValuePPC64_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (FLessThan (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FLessThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2740,12 +2975,14 @@ func rewriteValuePPC64_OpLess32F_0(v *Value) bool { } } func rewriteValuePPC64_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less32U x y) // result: (LessThan (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v0.AddArg(x) @@ -2755,12 +2992,14 @@ func rewriteValuePPC64_OpLess32U_0(v *Value) bool { } } func rewriteValuePPC64_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64 x y) // result: (LessThan (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -2770,12 +3009,14 @@ func rewriteValuePPC64_OpLess64_0(v *Value) bool { } } func rewriteValuePPC64_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (FLessThan (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FLessThan) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -2785,12 +3026,14 @@ func rewriteValuePPC64_OpLess64F_0(v *Value) bool { } } func rewriteValuePPC64_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Less64U x y) // result: (LessThan (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg(x) @@ -2800,13 +3043,15 @@ func rewriteValuePPC64_OpLess64U_0(v *Value) bool { } } func rewriteValuePPC64_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) @@ -2820,13 +3065,15 @@ func rewriteValuePPC64_OpLess8_0(v *Value) bool { } } func rewriteValuePPC64_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LessThan) v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -2840,6 +3087,8 @@ func rewriteValuePPC64_OpLess8U_0(v *Value) bool { } } func rewriteValuePPC64_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Load ptr mem) @@ -2847,8 +3096,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } @@ -2862,8 +3111,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && isSigned(t)) { break } @@ -2877,8 +3126,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVWZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && !isSigned(t)) { break } @@ -2892,8 +3141,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -2907,8 +3156,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVHZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -2922,8 +3171,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVBZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean()) { break } @@ -2937,8 +3186,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVBreg (MOVBZload ptr mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -2954,8 +3203,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (MOVBZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && !isSigned(t)) { break } @@ -2969,8 +3218,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (FMOVSload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -2984,8 +3233,8 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { // result: (FMOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -2997,12 +3246,12 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool { return false } func rewriteValuePPC64_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVDaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpPPC64MOVDaddr) v.Aux = sym v.AddArg(base) @@ -3010,14 +3259,16 @@ func rewriteValuePPC64_OpLocalAddr_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3029,8 +3280,8 @@ func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool { // match: (Lsh16x16 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3048,15 +3299,15 @@ func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3073,9 +3324,7 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool { // cond: uint32(c) < 16 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3092,8 +3341,8 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3105,8 +3354,8 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool { // match: (Lsh16x32 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3124,15 +3373,15 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3149,8 +3398,6 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -3166,9 +3413,7 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { // cond: uint64(c) < 16 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3185,8 +3430,8 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3198,8 +3443,8 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { // match: (Lsh16x64 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3215,14 +3460,16 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3234,8 +3481,8 @@ func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool { // match: (Lsh16x8 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3253,14 +3500,16 @@ func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3272,8 +3521,8 @@ func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool { // match: (Lsh32x16 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3291,15 +3540,15 @@ func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3316,9 +3565,7 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool { // cond: uint32(c) < 32 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3335,8 +3582,8 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3348,8 +3595,8 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool { // match: (Lsh32x32 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3367,15 +3614,15 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3392,8 +3639,6 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -3409,9 +3654,7 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { // cond: uint64(c) < 32 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3428,8 +3671,8 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3441,16 +3684,15 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { // match: (Lsh32x64 x (AND y (MOVDconst [31]))) // result: (SLW x (ANDconst [31] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64AND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1.Args[_i0] - v_1_1 := v_1.Args[1^_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { continue } @@ -3467,9 +3709,7 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { // match: (Lsh32x64 x (ANDconst [31] y)) // result: (SLW x (ANDconst [31] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int32 || v_1.AuxInt != 31 { break } @@ -3485,8 +3725,8 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { // match: (Lsh32x64 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3502,14 +3742,16 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3521,8 +3763,8 @@ func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool { // match: (Lsh32x8 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3540,14 +3782,16 @@ func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3559,8 +3803,8 @@ func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool { // match: (Lsh64x16 x y) // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3578,15 +3822,15 @@ func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x (Const64 [c])) // cond: uint32(c) < 64 // result: (SLDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3603,9 +3847,7 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool { // cond: uint32(c) < 64 // result: (SLDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3622,8 +3864,8 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3635,8 +3877,8 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool { // match: (Lsh64x32 x y) // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3654,15 +3896,15 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x64 x (Const64 [c])) // cond: uint64(c) < 64 // result: (SLDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3679,8 +3921,6 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -3696,9 +3936,7 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { // cond: uint64(c) < 64 // result: (SLDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3715,8 +3953,8 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3728,16 +3966,15 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 x (AND y (MOVDconst [63]))) // result: (SLD x (ANDconst [63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64AND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1.Args[_i0] - v_1_1 := v_1.Args[1^_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { continue } @@ -3754,9 +3991,7 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 x (ANDconst [63] y)) // result: (SLD x (ANDconst [63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int64 || v_1.AuxInt != 63 { break } @@ -3772,8 +4007,8 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 x y) // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3789,14 +4024,16 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3808,8 +4045,8 @@ func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool { // match: (Lsh64x8 x y) // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3827,14 +4064,16 @@ func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3846,8 +4085,8 @@ func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool { // match: (Lsh8x16 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3865,15 +4104,15 @@ func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3890,9 +4129,7 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool { // cond: uint32(c) < 8 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -3909,8 +4146,8 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3922,8 +4159,8 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool { // match: (Lsh8x32 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -3941,15 +4178,15 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -3966,8 +4203,6 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -3983,9 +4218,7 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { // cond: uint64(c) < 8 // result: (SLWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -4002,8 +4235,8 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -4015,8 +4248,8 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { // match: (Lsh8x64 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -4032,14 +4265,16 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { } } func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -4051,8 +4286,8 @@ func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool { // match: (Lsh8x8 x y) // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -4070,13 +4305,15 @@ func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool { } } func rewriteValuePPC64_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -4088,13 +4325,15 @@ func rewriteValuePPC64_OpMod16_0(v *Value) bool { } } func rewriteValuePPC64_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -4106,13 +4345,15 @@ func rewriteValuePPC64_OpMod16u_0(v *Value) bool { } } func rewriteValuePPC64_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) // result: (SUB x (MULLW y (DIVW x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) @@ -4126,13 +4367,15 @@ func rewriteValuePPC64_OpMod32_0(v *Value) bool { } } func rewriteValuePPC64_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (SUB x (MULLW y (DIVWU x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) @@ -4146,13 +4389,15 @@ func rewriteValuePPC64_OpMod32u_0(v *Value) bool { } } func rewriteValuePPC64_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod64 x y) // result: (SUB x (MULLD y (DIVD x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) @@ -4166,13 +4411,15 @@ func rewriteValuePPC64_OpMod64_0(v *Value) bool { } } func rewriteValuePPC64_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod64u x y) // result: (SUB x (MULLD y (DIVDU x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) @@ -4186,13 +4433,15 @@ func rewriteValuePPC64_OpMod64u_0(v *Value) bool { } } func rewriteValuePPC64_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -4204,13 +4453,15 @@ func rewriteValuePPC64_OpMod8_0(v *Value) bool { } } func rewriteValuePPC64_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpMod32u) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -4222,6 +4473,9 @@ func rewriteValuePPC64_OpMod8u_0(v *Value) bool { } } func rewriteValuePPC64_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -4230,7 +4484,7 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -4242,9 +4496,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) @@ -4260,9 +4514,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVHstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) @@ -4278,9 +4532,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVWstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) @@ -4298,9 +4552,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { break } t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -4319,9 +4573,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVWstore) v.AuxInt = 4 v.AddArg(dst) @@ -4346,9 +4600,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -4373,9 +4627,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 4 v.AddArg(dst) @@ -4400,9 +4654,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = 4 v.AddArg(dst) @@ -4427,9 +4681,9 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = 6 v.AddArg(dst) @@ -4460,14 +4714,17 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool { return false } func rewriteValuePPC64_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Move [s] dst src mem) // cond: s > 8 // result: (LoweredMove [s] dst src mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 8) { break } @@ -4481,11 +4738,13 @@ func rewriteValuePPC64_OpMove_10(v *Value) bool { return false } func rewriteValuePPC64_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (MULLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULLW) v.AddArg(x) v.AddArg(y) @@ -4493,11 +4752,13 @@ func rewriteValuePPC64_OpMul16_0(v *Value) bool { } } func rewriteValuePPC64_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MULLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULLW) v.AddArg(x) v.AddArg(y) @@ -4505,11 +4766,13 @@ func rewriteValuePPC64_OpMul32_0(v *Value) bool { } } func rewriteValuePPC64_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (FMULS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FMULS) v.AddArg(x) v.AddArg(y) @@ -4517,11 +4780,13 @@ func rewriteValuePPC64_OpMul32F_0(v *Value) bool { } } func rewriteValuePPC64_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64 x y) // result: (MULLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULLD) v.AddArg(x) v.AddArg(y) @@ -4529,11 +4794,13 @@ func rewriteValuePPC64_OpMul64_0(v *Value) bool { } } func rewriteValuePPC64_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (FMUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FMUL) v.AddArg(x) v.AddArg(y) @@ -4541,11 +4808,13 @@ func rewriteValuePPC64_OpMul64F_0(v *Value) bool { } } func rewriteValuePPC64_OpMul64uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64uhilo x y) // result: (LoweredMuluhilo x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64LoweredMuluhilo) v.AddArg(x) v.AddArg(y) @@ -4553,11 +4822,13 @@ func rewriteValuePPC64_OpMul64uhilo_0(v *Value) bool { } } func rewriteValuePPC64_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (MULLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64MULLW) v.AddArg(x) v.AddArg(y) @@ -4565,76 +4836,83 @@ func rewriteValuePPC64_OpMul8_0(v *Value) bool { } } func rewriteValuePPC64_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NEG) v.AddArg(x) return true } } func rewriteValuePPC64_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NEG) v.AddArg(x) return true } } func rewriteValuePPC64_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (FNEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FNEG) v.AddArg(x) return true } } func rewriteValuePPC64_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NEG) v.AddArg(x) return true } } func rewriteValuePPC64_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (FNEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FNEG) v.AddArg(x) return true } } func rewriteValuePPC64_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64NEG) v.AddArg(x) return true } } func rewriteValuePPC64_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // cond: isSigned(x.Type) && isSigned(y.Type) // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - y := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 if !(isSigned(x.Type) && isSigned(y.Type)) { continue } @@ -4654,8 +4932,8 @@ func rewriteValuePPC64_OpNeq16_0(v *Value) bool { // match: (Neq16 x y) // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -4669,12 +4947,14 @@ func rewriteValuePPC64_OpNeq16_0(v *Value) bool { } } func rewriteValuePPC64_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32 x y) // result: (NotEqual (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v0.AddArg(x) @@ -4684,12 +4964,14 @@ func rewriteValuePPC64_OpNeq32_0(v *Value) bool { } } func rewriteValuePPC64_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (NotEqual (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -4699,12 +4981,14 @@ func rewriteValuePPC64_OpNeq32F_0(v *Value) bool { } } func rewriteValuePPC64_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64 x y) // result: (NotEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -4714,12 +4998,14 @@ func rewriteValuePPC64_OpNeq64_0(v *Value) bool { } } func rewriteValuePPC64_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (NotEqual (FCMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) v0.AddArg(x) @@ -4729,16 +5015,17 @@ func rewriteValuePPC64_OpNeq64F_0(v *Value) bool { } } func rewriteValuePPC64_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // cond: isSigned(x.Type) && isSigned(y.Type) // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - y := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 if !(isSigned(x.Type) && isSigned(y.Type)) { continue } @@ -4758,8 +5045,8 @@ func rewriteValuePPC64_OpNeq8_0(v *Value) bool { // match: (Neq8 x y) // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) @@ -4773,11 +5060,13 @@ func rewriteValuePPC64_OpNeq8_0(v *Value) bool { } } func rewriteValuePPC64_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64XOR) v.AddArg(x) v.AddArg(y) @@ -4785,12 +5074,14 @@ func rewriteValuePPC64_OpNeqB_0(v *Value) bool { } } func rewriteValuePPC64_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (NotEqual (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64NotEqual) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg(x) @@ -4800,11 +5091,13 @@ func rewriteValuePPC64_OpNeqPtr_0(v *Value) bool { } } func rewriteValuePPC64_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -4812,10 +5105,11 @@ func rewriteValuePPC64_OpNilCheck_0(v *Value) bool { } } func rewriteValuePPC64_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64XORconst) v.AuxInt = 1 v.AddArg(x) @@ -4823,13 +5117,14 @@ func rewriteValuePPC64_OpNot_0(v *Value) bool { } } func rewriteValuePPC64_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OffPtr [off] ptr) // result: (ADD (MOVDconst [off]) ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpPPC64ADD) v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v0.AuxInt = off @@ -4839,11 +5134,13 @@ func rewriteValuePPC64_OpOffPtr_0(v *Value) bool { } } func rewriteValuePPC64_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64OR) v.AddArg(x) v.AddArg(y) @@ -4851,11 +5148,13 @@ func rewriteValuePPC64_OpOr16_0(v *Value) bool { } } func rewriteValuePPC64_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64OR) v.AddArg(x) v.AddArg(y) @@ -4863,11 +5162,13 @@ func rewriteValuePPC64_OpOr32_0(v *Value) bool { } } func rewriteValuePPC64_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or64 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64OR) v.AddArg(x) v.AddArg(y) @@ -4875,11 +5176,13 @@ func rewriteValuePPC64_OpOr64_0(v *Value) bool { } } func rewriteValuePPC64_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64OR) v.AddArg(x) v.AddArg(y) @@ -4887,11 +5190,13 @@ func rewriteValuePPC64_OpOr8_0(v *Value) bool { } } func rewriteValuePPC64_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64OR) v.AddArg(x) v.AddArg(y) @@ -4899,21 +5204,20 @@ func rewriteValuePPC64_OpOrB_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADD (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (ROTLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLDconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRDconst { continue } @@ -4932,15 +5236,12 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { // cond: d == 32-c // result: (ROTLWconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLWconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRWconst { continue } @@ -4958,9 +5259,7 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { // match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) // result: (ROTL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLD { continue } @@ -4971,7 +5270,6 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRD { continue } @@ -5002,9 +5300,7 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLW { continue } @@ -5015,7 +5311,6 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRW { continue } @@ -5047,10 +5342,8 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { // cond: is32Bit(c) // result: (ADDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpPPC64MOVDconst { continue } @@ -5068,12 +5361,12 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ADDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDconst [c] (ADDconst [d] x)) // cond: is32Bit(c+d) // result: (ADDconst [c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } @@ -5093,7 +5386,7 @@ func rewriteValuePPC64_OpPPC64ADDconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5103,7 +5396,6 @@ func rewriteValuePPC64_OpPPC64ADDconst_0(v *Value) bool { // result: (MOVDaddr [c+d] {sym} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDaddr { break } @@ -5119,13 +5411,13 @@ func rewriteValuePPC64_OpPPC64ADDconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AND x (NOR y y)) // result: (ANDN x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpPPC64NOR { continue } @@ -5143,14 +5435,11 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { // match: (AND (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c&d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64MOVDconst { continue } @@ -5165,10 +5454,8 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { // cond: isU16Bit(c) // result: (ANDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpPPC64MOVDconst { continue } @@ -5187,14 +5474,12 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { // cond: c&0xFFFFFFFF == 0xFFFFFFFF // result: y for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { continue } c := v_0.AuxInt - y := v.Args[1^_i0] + y := v_1 if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) { continue } @@ -5208,13 +5493,11 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) // result: (MOVWZreg x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 0xFFFFFFFF { continue } - y := v.Args[1^_i0] + y := v_1 if y.Op != OpPPC64MOVWreg { continue } @@ -5228,14 +5511,12 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) // result: (ANDconst [c&0xFF] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { continue } c := v_0.AuxInt - x := v.Args[1^_i0] + x := v_1 if x.Op != OpPPC64MOVBZload { continue } @@ -5250,11 +5531,11 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [c] (ANDconst [d] x)) // result: (ANDconst [c&d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64ANDconst { break } @@ -5271,7 +5552,7 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5292,7 +5573,7 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { // result: y for { c := v.AuxInt - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { break } @@ -5307,7 +5588,7 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { if v.AuxInt != 0xFF { break } - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBreg { break } @@ -5321,7 +5602,7 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { // result: y for { c := v.AuxInt - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { break } @@ -5336,7 +5617,7 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { if v.AuxInt != 0xFFFF { break } - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHreg { break } @@ -5349,7 +5630,6 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { // result: (ANDconst [c&0xFF] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVBreg { break } @@ -5363,7 +5643,6 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { // result: (ANDconst [c&0xFF] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVBZreg { break } @@ -5377,7 +5656,6 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { // result: (ANDconst [c&0xFFFF] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVHreg { break } @@ -5390,11 +5668,11 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ANDconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [c] (MOVHZreg x)) // result: (ANDconst [c&0xFFFF] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVHZreg { break } @@ -5408,7 +5686,6 @@ func rewriteValuePPC64_OpPPC64ANDconst_10(v *Value) bool { // result: (ANDconst [c&0xFFFFFFFF] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVWreg { break } @@ -5422,7 +5699,6 @@ func rewriteValuePPC64_OpPPC64ANDconst_10(v *Value) bool { // result: (ANDconst [c&0xFFFFFFFF] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVWZreg { break } @@ -5435,14 +5711,14 @@ func rewriteValuePPC64_OpPPC64ANDconst_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMP_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMP x (MOVDconst [c])) // cond: is16Bit(c) // result: (CMPconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -5459,12 +5735,11 @@ func rewriteValuePPC64_OpPPC64CMP_0(v *Value) bool { // cond: is16Bit(c) // result: (InvertFlags (CMPconst y [c])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt + y := v_1 if !(is16Bit(c)) { break } @@ -5478,14 +5753,14 @@ func rewriteValuePPC64_OpPPC64CMP_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPU x (MOVDconst [c])) // cond: isU16Bit(c) // result: (CMPUconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -5502,12 +5777,11 @@ func rewriteValuePPC64_OpPPC64CMPU_0(v *Value) bool { // cond: isU16Bit(c) // result: (InvertFlags (CMPUconst y [c])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt + y := v_1 if !(isU16Bit(c)) { break } @@ -5521,12 +5795,12 @@ func rewriteValuePPC64_OpPPC64CMPU_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPUconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPUconst (MOVDconst [x]) [y]) // cond: x==y // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5542,7 +5816,6 @@ func rewriteValuePPC64_OpPPC64CMPUconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5558,7 +5831,6 @@ func rewriteValuePPC64_OpPPC64CMPUconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5572,13 +5844,13 @@ func rewriteValuePPC64_OpPPC64CMPUconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVWreg y)) // result: (CMPW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVWreg { break } @@ -5591,12 +5863,11 @@ func rewriteValuePPC64_OpPPC64CMPW_0(v *Value) bool { // match: (CMPW (MOVWreg x) y) // result: (CMPW x y) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVWreg { break } x := v_0.Args[0] + y := v_1 v.reset(OpPPC64CMPW) v.AddArg(x) v.AddArg(y) @@ -5606,9 +5877,7 @@ func rewriteValuePPC64_OpPPC64CMPW_0(v *Value) bool { // cond: is16Bit(c) // result: (CMPWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -5625,12 +5894,11 @@ func rewriteValuePPC64_OpPPC64CMPW_0(v *Value) bool { // cond: is16Bit(c) // result: (InvertFlags (CMPWconst y [c])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt + y := v_1 if !(is16Bit(c)) { break } @@ -5644,13 +5912,13 @@ func rewriteValuePPC64_OpPPC64CMPW_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPWU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPWU x (MOVWZreg y)) // result: (CMPWU x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVWZreg { break } @@ -5663,12 +5931,11 @@ func rewriteValuePPC64_OpPPC64CMPWU_0(v *Value) bool { // match: (CMPWU (MOVWZreg x) y) // result: (CMPWU x y) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVWZreg { break } x := v_0.Args[0] + y := v_1 v.reset(OpPPC64CMPWU) v.AddArg(x) v.AddArg(y) @@ -5678,9 +5945,7 @@ func rewriteValuePPC64_OpPPC64CMPWU_0(v *Value) bool { // cond: isU16Bit(c) // result: (CMPWUconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -5697,12 +5962,11 @@ func rewriteValuePPC64_OpPPC64CMPWU_0(v *Value) bool { // cond: isU16Bit(c) // result: (InvertFlags (CMPWUconst y [c])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt + y := v_1 if !(isU16Bit(c)) { break } @@ -5716,12 +5980,12 @@ func rewriteValuePPC64_OpPPC64CMPWU_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPWUconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPWUconst (MOVDconst [x]) [y]) // cond: int32(x)==int32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5737,7 +6001,6 @@ func rewriteValuePPC64_OpPPC64CMPWUconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5753,7 +6016,6 @@ func rewriteValuePPC64_OpPPC64CMPWUconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5767,12 +6029,12 @@ func rewriteValuePPC64_OpPPC64CMPWUconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPWconst (MOVDconst [x]) [y]) // cond: int32(x)==int32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5788,7 +6050,6 @@ func rewriteValuePPC64_OpPPC64CMPWconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5804,7 +6065,6 @@ func rewriteValuePPC64_OpPPC64CMPWconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5818,12 +6078,12 @@ func rewriteValuePPC64_OpPPC64CMPWconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64CMPconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPconst (MOVDconst [x]) [y]) // cond: x==y // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5839,7 +6099,6 @@ func rewriteValuePPC64_OpPPC64CMPconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5855,7 +6114,6 @@ func rewriteValuePPC64_OpPPC64CMPconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -5869,12 +6127,12 @@ func rewriteValuePPC64_OpPPC64CMPconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Equal (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagEQ { break } @@ -5885,7 +6143,6 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { // match: (Equal (FlagLT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagLT { break } @@ -5896,7 +6153,6 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { // match: (Equal (FlagGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagGT { break } @@ -5907,7 +6163,6 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { // match: (Equal (InvertFlags x)) // result: (Equal x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64InvertFlags { break } @@ -5919,7 +6174,7 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { // match: (Equal cmp) // result: (ISELB [2] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -5930,10 +6185,10 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool { + v_0 := v.Args[0] // match: (FABS (FMOVDconst [x])) // result: (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { break } @@ -5945,18 +6200,18 @@ func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FADD (FMUL x y) z) // result: (FMADD x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64FMUL { continue } y := v_0.Args[1] x := v_0.Args[0] - z := v.Args[1^_i0] + z := v_1 v.reset(OpPPC64FMADD) v.AddArg(x) v.AddArg(y) @@ -5968,18 +6223,18 @@ func rewriteValuePPC64_OpPPC64FADD_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FADDS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FADDS (FMULS x y) z) // result: (FMADDS x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64FMULS { continue } y := v_0.Args[1] x := v_0.Args[0] - z := v.Args[1^_i0] + z := v_1 v.reset(OpPPC64FMADDS) v.AddArg(x) v.AddArg(y) @@ -5991,10 +6246,10 @@ func rewriteValuePPC64_OpPPC64FADDS_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool { + v_0 := v.Args[0] // match: (FCEIL (FMOVDconst [x])) // result: (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { break } @@ -6006,10 +6261,10 @@ func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool { + v_0 := v.Args[0] // match: (FFLOOR (FMOVDconst [x])) // result: (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { break } @@ -6021,12 +6276,13 @@ func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FGreaterEqual_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (FGreaterEqual cmp) // result: (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISEL) v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -6044,12 +6300,13 @@ func rewriteValuePPC64_OpPPC64FGreaterEqual_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64FGreaterThan_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (FGreaterThan cmp) // result: (ISELB [1] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -6060,12 +6317,13 @@ func rewriteValuePPC64_OpPPC64FGreaterThan_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64FLessEqual_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (FLessEqual cmp) // result: (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISEL) v.AuxInt = 2 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -6083,12 +6341,13 @@ func rewriteValuePPC64_OpPPC64FLessEqual_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64FLessThan_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (FLessThan cmp) // result: (ISELB [0] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -6099,14 +6358,14 @@ func rewriteValuePPC64_OpPPC64FLessThan_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64FMOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) // result: (MTVSRD x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -6125,14 +6384,14 @@ func rewriteValuePPC64_OpPPC64FMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -6149,13 +6408,12 @@ func rewriteValuePPC64_OpPPC64FMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -6169,18 +6427,20 @@ func rewriteValuePPC64_OpPPC64FMOVDload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) // result: (MOVDstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MTVSRD { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVDstore) v.AuxInt = off v.Aux = sym @@ -6195,14 +6455,13 @@ func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1 + off2)) { break } @@ -6220,15 +6479,15 @@ func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -6243,20 +6502,22 @@ func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FMOVSload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -6273,13 +6534,12 @@ func rewriteValuePPC64_OpPPC64FMOVSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -6293,20 +6553,22 @@ func rewriteValuePPC64_OpPPC64FMOVSload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FMOVSstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is16Bit(off1+off2) // result: (FMOVSstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1 + off2)) { break } @@ -6324,15 +6586,15 @@ func rewriteValuePPC64_OpPPC64FMOVSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -6347,10 +6609,10 @@ func rewriteValuePPC64_OpPPC64FMOVSstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FNEG_0(v *Value) bool { + v_0 := v.Args[0] // match: (FNEG (FABS x)) // result: (FNABS x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FABS { break } @@ -6362,7 +6624,6 @@ func rewriteValuePPC64_OpPPC64FNEG_0(v *Value) bool { // match: (FNEG (FNABS x)) // result: (FABS x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FNABS { break } @@ -6374,10 +6635,10 @@ func rewriteValuePPC64_OpPPC64FNEG_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool { + v_0 := v.Args[0] // match: (FSQRT (FMOVDconst [x])) // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { break } @@ -6389,16 +6650,17 @@ func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FSUB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FSUB (FMUL x y) z) // result: (FMSUB x y z) for { - z := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64FMUL { break } y := v_0.Args[1] x := v_0.Args[0] + z := v_1 v.reset(OpPPC64FMSUB) v.AddArg(x) v.AddArg(y) @@ -6408,16 +6670,17 @@ func rewriteValuePPC64_OpPPC64FSUB_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FSUBS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FSUBS (FMULS x y) z) // result: (FMSUBS x y z) for { - z := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64FMULS { break } y := v_0.Args[1] x := v_0.Args[0] + z := v_1 v.reset(OpPPC64FMSUBS) v.AddArg(x) v.AddArg(y) @@ -6427,10 +6690,10 @@ func rewriteValuePPC64_OpPPC64FSUBS_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool { + v_0 := v.Args[0] // match: (FTRUNC (FMOVDconst [x])) // result: (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { break } @@ -6442,12 +6705,12 @@ func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqual (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagEQ { break } @@ -6458,7 +6721,6 @@ func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagLT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagLT { break } @@ -6469,7 +6731,6 @@ func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagGT { break } @@ -6480,7 +6741,6 @@ func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (InvertFlags x)) // result: (LessEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64InvertFlags { break } @@ -6492,7 +6752,7 @@ func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual cmp) // result: (ISELB [4] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 4 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -6503,12 +6763,12 @@ func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64GreaterThan_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (GreaterThan (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagEQ { break } @@ -6519,7 +6779,6 @@ func rewriteValuePPC64_OpPPC64GreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagLT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagLT { break } @@ -6530,7 +6789,6 @@ func rewriteValuePPC64_OpPPC64GreaterThan_0(v *Value) bool { // match: (GreaterThan (FlagGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagGT { break } @@ -6541,7 +6799,6 @@ func rewriteValuePPC64_OpPPC64GreaterThan_0(v *Value) bool { // match: (GreaterThan (InvertFlags x)) // result: (LessThan x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64InvertFlags { break } @@ -6553,7 +6810,7 @@ func rewriteValuePPC64_OpPPC64GreaterThan_0(v *Value) bool { // match: (GreaterThan cmp) // result: (ISELB [1] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -6564,15 +6821,16 @@ func rewriteValuePPC64_OpPPC64GreaterThan_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ISEL [2] x _ (FlagEQ)) // result: x for { if v.AuxInt != 2 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagEQ { break } @@ -6587,9 +6845,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 2 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagLT { break } @@ -6604,9 +6860,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 2 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagGT { break } @@ -6621,9 +6875,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 6 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagEQ { break } @@ -6638,9 +6890,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 6 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagLT { break } @@ -6655,9 +6905,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 6 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagGT { break } @@ -6672,9 +6920,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 0 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagEQ { break } @@ -6689,9 +6935,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 0 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagGT { break } @@ -6706,9 +6950,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 0 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagLT { break } @@ -6723,9 +6965,7 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { if v.AuxInt != 5 { break } - _ = v.Args[2] - x := v.Args[1] - v_2 := v.Args[2] + x := v_1 if v_2.Op != OpPPC64FlagEQ { break } @@ -6737,15 +6977,16 @@ func rewriteValuePPC64_OpPPC64ISEL_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ISEL [5] _ x (FlagLT)) // result: x for { if v.AuxInt != 5 { break } - _ = v.Args[2] - x := v.Args[1] - v_2 := v.Args[2] + x := v_1 if v_2.Op != OpPPC64FlagLT { break } @@ -6760,9 +7001,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 5 { break } - _ = v.Args[2] - y := v.Args[0] - v_2 := v.Args[2] + y := v_0 if v_2.Op != OpPPC64FlagGT { break } @@ -6777,9 +7016,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 1 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagEQ { break } @@ -6794,9 +7031,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 1 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagLT { break } @@ -6811,9 +7046,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 1 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagGT { break } @@ -6828,9 +7061,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 4 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagEQ { break } @@ -6845,9 +7076,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 4 { break } - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpPPC64FlagGT { break } @@ -6862,9 +7091,7 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { if v.AuxInt != 4 { break } - _ = v.Args[2] - y := v.Args[1] - v_2 := v.Args[2] + y := v_1 if v_2.Op != OpPPC64FlagLT { break } @@ -6878,10 +7105,8 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { // result: (ISEL [n+1] x y bool) for { n := v.AuxInt - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpPPC64InvertFlags { break } @@ -6901,10 +7126,8 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { // result: (ISEL [n-1] x y bool) for { n := v.AuxInt - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpPPC64InvertFlags { break } @@ -6922,15 +7145,16 @@ func rewriteValuePPC64_OpPPC64ISEL_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ISEL_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ISEL [n] x y (InvertFlags bool)) // cond: n%4 == 2 // result: (ISEL [n] x y bool) for { n := v.AuxInt - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpPPC64InvertFlags { break } @@ -6948,15 +7172,11 @@ func rewriteValuePPC64_OpPPC64ISEL_20(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { + v_1 := v.Args[1] // match: (ISELB [0] _ (FlagLT)) // result: (MOVDconst [1]) for { - if v.AuxInt != 0 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagLT { + if v.AuxInt != 0 || v_1.Op != OpPPC64FlagLT { break } v.reset(OpPPC64MOVDconst) @@ -6966,12 +7186,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [0] _ (FlagGT)) // result: (MOVDconst [0]) for { - if v.AuxInt != 0 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagGT { + if v.AuxInt != 0 || v_1.Op != OpPPC64FlagGT { break } v.reset(OpPPC64MOVDconst) @@ -6981,12 +7196,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [0] _ (FlagEQ)) // result: (MOVDconst [0]) for { - if v.AuxInt != 0 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagEQ { + if v.AuxInt != 0 || v_1.Op != OpPPC64FlagEQ { break } v.reset(OpPPC64MOVDconst) @@ -6996,12 +7206,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [1] _ (FlagGT)) // result: (MOVDconst [1]) for { - if v.AuxInt != 1 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagGT { + if v.AuxInt != 1 || v_1.Op != OpPPC64FlagGT { break } v.reset(OpPPC64MOVDconst) @@ -7011,12 +7216,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [1] _ (FlagLT)) // result: (MOVDconst [0]) for { - if v.AuxInt != 1 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagLT { + if v.AuxInt != 1 || v_1.Op != OpPPC64FlagLT { break } v.reset(OpPPC64MOVDconst) @@ -7026,12 +7226,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [1] _ (FlagEQ)) // result: (MOVDconst [0]) for { - if v.AuxInt != 1 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagEQ { + if v.AuxInt != 1 || v_1.Op != OpPPC64FlagEQ { break } v.reset(OpPPC64MOVDconst) @@ -7041,12 +7236,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [2] _ (FlagEQ)) // result: (MOVDconst [1]) for { - if v.AuxInt != 2 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagEQ { + if v.AuxInt != 2 || v_1.Op != OpPPC64FlagEQ { break } v.reset(OpPPC64MOVDconst) @@ -7056,12 +7246,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [2] _ (FlagLT)) // result: (MOVDconst [0]) for { - if v.AuxInt != 2 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagLT { + if v.AuxInt != 2 || v_1.Op != OpPPC64FlagLT { break } v.reset(OpPPC64MOVDconst) @@ -7071,12 +7256,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [2] _ (FlagGT)) // result: (MOVDconst [0]) for { - if v.AuxInt != 2 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagGT { + if v.AuxInt != 2 || v_1.Op != OpPPC64FlagGT { break } v.reset(OpPPC64MOVDconst) @@ -7086,12 +7266,7 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { // match: (ISELB [4] _ (FlagLT)) // result: (MOVDconst [0]) for { - if v.AuxInt != 4 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagLT { + if v.AuxInt != 4 || v_1.Op != OpPPC64FlagLT { break } v.reset(OpPPC64MOVDconst) @@ -7101,17 +7276,14 @@ func rewriteValuePPC64_OpPPC64ISELB_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ISELB [4] _ (FlagGT)) // result: (MOVDconst [1]) for { - if v.AuxInt != 4 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagGT { + if v.AuxInt != 4 || v_1.Op != OpPPC64FlagGT { break } v.reset(OpPPC64MOVDconst) @@ -7121,12 +7293,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [4] _ (FlagEQ)) // result: (MOVDconst [1]) for { - if v.AuxInt != 4 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagEQ { + if v.AuxInt != 4 || v_1.Op != OpPPC64FlagEQ { break } v.reset(OpPPC64MOVDconst) @@ -7136,12 +7303,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [5] _ (FlagGT)) // result: (MOVDconst [0]) for { - if v.AuxInt != 5 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagGT { + if v.AuxInt != 5 || v_1.Op != OpPPC64FlagGT { break } v.reset(OpPPC64MOVDconst) @@ -7151,12 +7313,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [5] _ (FlagLT)) // result: (MOVDconst [1]) for { - if v.AuxInt != 5 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagLT { + if v.AuxInt != 5 || v_1.Op != OpPPC64FlagLT { break } v.reset(OpPPC64MOVDconst) @@ -7166,12 +7323,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [5] _ (FlagEQ)) // result: (MOVDconst [1]) for { - if v.AuxInt != 5 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagEQ { + if v.AuxInt != 5 || v_1.Op != OpPPC64FlagEQ { break } v.reset(OpPPC64MOVDconst) @@ -7181,12 +7333,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [6] _ (FlagEQ)) // result: (MOVDconst [0]) for { - if v.AuxInt != 6 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagEQ { + if v.AuxInt != 6 || v_1.Op != OpPPC64FlagEQ { break } v.reset(OpPPC64MOVDconst) @@ -7196,12 +7343,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [6] _ (FlagLT)) // result: (MOVDconst [1]) for { - if v.AuxInt != 6 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagLT { + if v.AuxInt != 6 || v_1.Op != OpPPC64FlagLT { break } v.reset(OpPPC64MOVDconst) @@ -7211,12 +7353,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // match: (ISELB [6] _ (FlagGT)) // result: (MOVDconst [1]) for { - if v.AuxInt != 6 { - break - } - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpPPC64FlagGT { + if v.AuxInt != 6 || v_1.Op != OpPPC64FlagGT { break } v.reset(OpPPC64MOVDconst) @@ -7228,13 +7365,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // result: (ISELB [n+1] (MOVDconst [1]) bool) for { n := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpPPC64InvertFlags { + if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 || v_1.Op != OpPPC64InvertFlags { break } bool := v_1.Args[0] @@ -7254,13 +7385,7 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { // result: (ISELB [n-1] (MOVDconst [1]) bool) for { n := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpPPC64InvertFlags { + if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 || v_1.Op != OpPPC64InvertFlags { break } bool := v_1.Args[0] @@ -7278,6 +7403,8 @@ func rewriteValuePPC64_OpPPC64ISELB_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ISELB_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) @@ -7285,13 +7412,7 @@ func rewriteValuePPC64_OpPPC64ISELB_20(v *Value) bool { // result: (ISELB [n] (MOVDconst [1]) bool) for { n := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpPPC64InvertFlags { + if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 || v_1.Op != OpPPC64InvertFlags { break } bool := v_1.Args[0] @@ -7309,12 +7430,12 @@ func rewriteValuePPC64_OpPPC64ISELB_20(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64LessEqual_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (LessEqual (FlagEQ)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagEQ { break } @@ -7325,7 +7446,6 @@ func rewriteValuePPC64_OpPPC64LessEqual_0(v *Value) bool { // match: (LessEqual (FlagLT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagLT { break } @@ -7336,7 +7456,6 @@ func rewriteValuePPC64_OpPPC64LessEqual_0(v *Value) bool { // match: (LessEqual (FlagGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagGT { break } @@ -7347,7 +7466,6 @@ func rewriteValuePPC64_OpPPC64LessEqual_0(v *Value) bool { // match: (LessEqual (InvertFlags x)) // result: (GreaterEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64InvertFlags { break } @@ -7359,7 +7477,7 @@ func rewriteValuePPC64_OpPPC64LessEqual_0(v *Value) bool { // match: (LessEqual cmp) // result: (ISELB [5] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 5 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -7370,12 +7488,12 @@ func rewriteValuePPC64_OpPPC64LessEqual_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (LessThan (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagEQ { break } @@ -7386,7 +7504,6 @@ func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { // match: (LessThan (FlagLT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagLT { break } @@ -7397,7 +7514,6 @@ func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { // match: (LessThan (FlagGT)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagGT { break } @@ -7408,7 +7524,6 @@ func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { // match: (LessThan (InvertFlags x)) // result: (GreaterThan x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64InvertFlags { break } @@ -7420,7 +7535,7 @@ func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { // match: (LessThan cmp) // result: (ISELB [0] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 0 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -7431,12 +7546,12 @@ func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64MFVSRD_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MFVSRD (FMOVDconst [c])) // result: (MOVDconst [c]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { break } @@ -7449,7 +7564,7 @@ func rewriteValuePPC64_OpPPC64MFVSRD_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVDload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64FMOVDload { break } @@ -7473,20 +7588,22 @@ func rewriteValuePPC64_OpPPC64MFVSRD_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -7503,13 +7620,12 @@ func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -7528,13 +7644,13 @@ func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] + mem := v_1 if !(sym == nil && p.Uses == 1) { break } @@ -7547,17 +7663,19 @@ func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBZloadidx ptr (MOVDconst [c]) mem) // cond: is16Bit(c) // result: (MOVBZload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c)) { break } @@ -7571,13 +7689,12 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVBZload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 if !(is16Bit(c)) { break } @@ -7590,13 +7707,14 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64ANDconst { break } @@ -7612,7 +7730,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // match: (MOVBZreg (SRWconst [c] (MOVBZreg x))) // result: (SRWconst [c] (MOVBZreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -7633,7 +7750,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // cond: sizeof(x.Type) == 8 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -7651,7 +7767,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // cond: c>=56 // result: (SRDconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -7669,7 +7784,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // cond: c>=24 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -7686,7 +7800,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // match: (MOVBZreg y:(MOVBZreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBZreg { break } @@ -7698,7 +7812,6 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // match: (MOVBZreg (MOVBreg x)) // result: (MOVBZreg x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVBreg { break } @@ -7710,7 +7823,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // match: (MOVBZreg x:(MOVBZload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVBZload { break } @@ -7723,7 +7836,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVBZloadidx { break } @@ -7737,7 +7850,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { // cond: is8BitInt(t) && !isSigned(t) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -7753,10 +7866,10 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBZreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBZreg (MOVDconst [c])) // result: (MOVDconst [int64(uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -7768,13 +7881,14 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0x7F // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64ANDconst { break } @@ -7790,7 +7904,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // match: (MOVBreg (SRAWconst [c] (MOVBreg x))) // result: (SRAWconst [c] (MOVBreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -7811,7 +7924,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // cond: sizeof(x.Type) == 8 // result: (SRAWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -7829,7 +7941,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // cond: c>56 // result: (SRDconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -7847,7 +7958,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // cond: c==56 // result: (SRADconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -7865,7 +7975,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // cond: c>24 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -7883,7 +7992,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // cond: c==24 // result: (SRAWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -7900,7 +8008,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // match: (MOVBreg y:(MOVBreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBreg { break } @@ -7912,7 +8020,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // match: (MOVBreg (MOVBZreg x)) // result: (MOVBreg x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVBZreg { break } @@ -7925,7 +8032,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { // cond: is8BitInt(t) && isSigned(t) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -7941,10 +8048,10 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVBreg (MOVDconst [c])) // result: (MOVDconst [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -7956,20 +8063,22 @@ func rewriteValuePPC64_OpPPC64MOVBreg_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) // cond: is16Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} x val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1 + off2)) { break } @@ -7987,15 +8096,15 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -8012,12 +8121,11 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpPPC64MOVBstorezero) v.AuxInt = off v.Aux = sym @@ -8031,14 +8139,14 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil && p.Uses == 1) { break } @@ -8054,13 +8162,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVBreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym @@ -8074,13 +8181,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVBZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym @@ -8094,13 +8200,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym @@ -8114,13 +8219,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVHZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym @@ -8134,13 +8238,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym @@ -8154,13 +8257,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVBstore) v.AuxInt = off v.Aux = sym @@ -8172,6 +8274,9 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -8181,9 +8286,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64SRWconst { break } @@ -8193,6 +8296,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { break } x := v_1_0.Args[0] + mem := v_2 if !(c <= 8) { break } @@ -8213,9 +8317,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64SRWconst { break } @@ -8225,6 +8327,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { break } x := v_1_0.Args[0] + mem := v_2 if !(c <= 8) { break } @@ -8245,9 +8348,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64SRWconst { break } @@ -8257,6 +8358,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { break } x := v_1_0.Args[0] + mem := v_2 if !(c <= 24) { break } @@ -8277,9 +8379,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64SRWconst { break } @@ -8289,6 +8389,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { break } x := v_1_0.Args[0] + mem := v_2 if !(c <= 24) { break } @@ -8309,14 +8410,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRWconst || v_1.AuxInt != 24 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8349,14 +8448,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 24 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8389,14 +8486,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRWconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8422,14 +8517,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8455,10 +8548,9 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { i3 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8522,10 +8614,9 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8554,6 +8645,9 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -8563,14 +8657,12 @@ func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { for { i7 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 56 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8644,10 +8736,9 @@ func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { for { i7 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] + p := v_0 + w := v_1 + x0 := v_2 if x0.Op != OpPPC64MOVBstore { break } @@ -8772,20 +8863,23 @@ func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) // cond: is16Bit(c) // result: (MOVBstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 if !(is16Bit(c)) { break } @@ -8800,14 +8894,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVBstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 if !(is16Bit(c)) { break } @@ -8823,14 +8916,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVBreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym @@ -8845,14 +8937,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVBZreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym @@ -8867,14 +8958,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVHreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym @@ -8889,14 +8979,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVHZreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym @@ -8911,14 +9000,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym @@ -8933,14 +9021,13 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVWZreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVBstoreidx) v.AuxInt = off v.Aux = sym @@ -8956,10 +9043,8 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64SRWconst { break } @@ -8969,6 +9054,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { break } x := v_2_0.Args[0] + mem := v_3 if !(c <= 8) { break } @@ -8990,10 +9076,8 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64SRWconst { break } @@ -9003,6 +9087,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { break } x := v_2_0.Args[0] + mem := v_3 if !(c <= 8) { break } @@ -9021,6 +9106,10 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWreg x) [c]) mem) @@ -9029,10 +9118,8 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64SRWconst { break } @@ -9042,6 +9129,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { break } x := v_2_0.Args[0] + mem := v_3 if !(c <= 24) { break } @@ -9063,10 +9151,8 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64SRWconst { break } @@ -9076,6 +9162,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { break } x := v_2_0.Args[0] + mem := v_3 if !(c <= 24) { break } @@ -9094,19 +9181,20 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) // result: (MOVBstorezero [off1+off2] {sym} x mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -9123,14 +9211,14 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux x := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } @@ -9144,14 +9232,14 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) // result: (MFVSRD x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -9170,14 +9258,14 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { break } @@ -9194,13 +9282,12 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) { break } @@ -9219,13 +9306,13 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] + mem := v_1 if !(sym == nil && p.Uses == 1) { break } @@ -9238,17 +9325,19 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDloadidx ptr (MOVDconst [c]) mem) // cond: is16Bit(c) && c%4 == 0 // result: (MOVDload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c) && c%4 == 0) { break } @@ -9262,13 +9351,12 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool { // cond: is16Bit(c) && c%4 == 0 // result: (MOVDload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 if !(is16Bit(c) && c%4 == 0) { break } @@ -9281,18 +9369,20 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem) // result: (FMOVDstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MFVSRD { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64FMOVDstore) v.AuxInt = off v.Aux = sym @@ -9307,14 +9397,13 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) { break } @@ -9332,15 +9421,15 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { break } @@ -9357,12 +9446,11 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpPPC64MOVDstorezero) v.AuxInt = off v.Aux = sym @@ -9376,14 +9464,14 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil && p.Uses == 1) { break } @@ -9397,18 +9485,21 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) // cond: is16Bit(c) && c%4 == 0 // result: (MOVDstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 if !(is16Bit(c) && c%4 == 0) { break } @@ -9423,14 +9514,13 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool { // cond: is16Bit(c) && c%4 == 0 // result: (MOVDstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 if !(is16Bit(c) && c%4 == 0) { break } @@ -9444,19 +9534,20 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0 // result: (MOVDstorezero [off1+off2] {sym} x mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) { break } @@ -9473,14 +9564,14 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux x := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { break } @@ -9494,17 +9585,19 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHBRstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHBRstore {sym} ptr (MOVHreg x) mem) // result: (MOVHBRstore {sym} ptr x mem) for { sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym v.AddArg(ptr) @@ -9516,13 +9609,12 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore_0(v *Value) bool { // result: (MOVHBRstore {sym} ptr x mem) for { sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVHZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym v.AddArg(ptr) @@ -9534,13 +9626,12 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore_0(v *Value) bool { // result: (MOVHBRstore {sym} ptr x mem) for { sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym v.AddArg(ptr) @@ -9552,13 +9643,12 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore_0(v *Value) bool { // result: (MOVHBRstore {sym} ptr x mem) for { sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHBRstore) v.Aux = sym v.AddArg(ptr) @@ -9569,20 +9659,22 @@ func rewriteValuePPC64_OpPPC64MOVHBRstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -9599,13 +9691,12 @@ func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -9624,13 +9715,13 @@ func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] + mem := v_1 if !(sym == nil && p.Uses == 1) { break } @@ -9643,17 +9734,19 @@ func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHZloadidx ptr (MOVDconst [c]) mem) // cond: is16Bit(c) // result: (MOVHZload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c)) { break } @@ -9667,13 +9760,12 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVHZload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 if !(is16Bit(c)) { break } @@ -9686,13 +9778,14 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVHZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFFFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64ANDconst { break } @@ -9708,7 +9801,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // match: (MOVHZreg (SRWconst [c] (MOVBZreg x))) // result: (SRWconst [c] (MOVBZreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -9728,7 +9820,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // match: (MOVHZreg (SRWconst [c] (MOVHZreg x))) // result: (SRWconst [c] (MOVHZreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -9749,7 +9840,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // cond: sizeof(x.Type) <= 16 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -9767,7 +9857,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // cond: c>=48 // result: (SRDconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -9785,7 +9874,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // cond: c>=16 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -9802,7 +9890,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // match: (MOVHZreg y:(MOVHZreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHZreg { break } @@ -9814,7 +9902,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // match: (MOVHZreg y:(MOVBZreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBZreg { break } @@ -9826,7 +9914,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // match: (MOVHZreg y:(MOVHBRload _ _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHBRload { break } @@ -9839,7 +9927,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { // match: (MOVHZreg y:(MOVHreg x)) // result: (MOVHZreg x) for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHreg { break } @@ -9851,10 +9939,11 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHZreg x:(MOVBZload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVBZload { break } @@ -9867,7 +9956,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVBZloadidx { break } @@ -9880,7 +9969,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { // match: (MOVHZreg x:(MOVHZload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHZload { break } @@ -9893,7 +9982,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHZloadidx { break } @@ -9907,7 +9996,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -9923,7 +10012,6 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { // match: (MOVHZreg (MOVDconst [c])) // result: (MOVDconst [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -9935,20 +10023,22 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -9965,13 +10055,12 @@ func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -9990,13 +10079,13 @@ func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] + mem := v_1 if !(sym == nil && p.Uses == 1) { break } @@ -10009,17 +10098,19 @@ func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHloadidx ptr (MOVDconst [c]) mem) // cond: is16Bit(c) // result: (MOVHload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c)) { break } @@ -10033,13 +10124,12 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVHload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 if !(is16Bit(c)) { break } @@ -10052,13 +10142,14 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVHreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0x7FFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64ANDconst { break } @@ -10074,7 +10165,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // match: (MOVHreg (SRAWconst [c] (MOVBreg x))) // result: (SRAWconst [c] (MOVBreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -10094,7 +10184,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // match: (MOVHreg (SRAWconst [c] (MOVHreg x))) // result: (SRAWconst [c] (MOVHreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -10115,7 +10204,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // cond: sizeof(x.Type) <= 16 // result: (SRAWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -10133,7 +10221,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // cond: c>48 // result: (SRDconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -10151,7 +10238,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // cond: c==48 // result: (SRADconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -10169,7 +10255,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // cond: c>16 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -10187,7 +10272,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // cond: c==16 // result: (SRAWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -10204,7 +10288,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // match: (MOVHreg y:(MOVHreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHreg { break } @@ -10216,7 +10300,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { // match: (MOVHreg y:(MOVBreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBreg { break } @@ -10228,10 +10312,11 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHreg y:(MOVHZreg x)) // result: (MOVHreg x) for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHZreg { break } @@ -10243,7 +10328,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { // match: (MOVHreg x:(MOVHload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHload { break } @@ -10256,7 +10341,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { // match: (MOVHreg x:(MOVHloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHloadidx { break } @@ -10270,7 +10355,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -10286,7 +10371,6 @@ func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { // match: (MOVHreg (MOVDconst [c])) // result: (MOVDconst [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -10298,6 +10382,9 @@ func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) @@ -10306,14 +10393,13 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1 + off2)) { break } @@ -10331,15 +10417,15 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -10356,12 +10442,11 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpPPC64MOVHstorezero) v.AuxInt = off v.Aux = sym @@ -10375,14 +10460,14 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil && p.Uses == 1) { break } @@ -10398,13 +10483,12 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym @@ -10418,13 +10502,12 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVHZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym @@ -10438,13 +10521,12 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym @@ -10458,13 +10540,12 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVHstore) v.AuxInt = off v.Aux = sym @@ -10479,14 +10560,12 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRWconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVHstore { break } @@ -10512,14 +10591,12 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { for { i1 := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x0 := v.Args[2] + x0 := v_2 if x0.Op != OpPPC64MOVHstore { break } @@ -10542,18 +10619,21 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) // cond: is16Bit(c) // result: (MOVHstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 if !(is16Bit(c)) { break } @@ -10568,14 +10648,13 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVHstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 if !(is16Bit(c)) { break } @@ -10591,14 +10670,13 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVHreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym @@ -10613,14 +10691,13 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVHZreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym @@ -10635,14 +10712,13 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym @@ -10657,14 +10733,13 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVWZreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVHstoreidx) v.AuxInt = off v.Aux = sym @@ -10677,19 +10752,20 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) // result: (MOVHstorezero [off1+off2] {sym} x mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -10706,14 +10782,14 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux x := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } @@ -10727,17 +10803,19 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWBRstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWBRstore {sym} ptr (MOVWreg x) mem) // result: (MOVWBRstore {sym} ptr x mem) for { sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVWBRstore) v.Aux = sym v.AddArg(ptr) @@ -10749,13 +10827,12 @@ func rewriteValuePPC64_OpPPC64MOVWBRstore_0(v *Value) bool { // result: (MOVWBRstore {sym} ptr x mem) for { sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVWBRstore) v.Aux = sym v.AddArg(ptr) @@ -10766,20 +10843,22 @@ func rewriteValuePPC64_OpPPC64MOVWBRstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -10796,13 +10875,12 @@ func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -10821,13 +10899,13 @@ func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] + mem := v_1 if !(sym == nil && p.Uses == 1) { break } @@ -10840,17 +10918,19 @@ func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWZloadidx ptr (MOVDconst [c]) mem) // cond: is16Bit(c) // result: (MOVWZload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c)) { break } @@ -10864,13 +10944,12 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVWZload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 if !(is16Bit(c)) { break } @@ -10883,13 +10962,14 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVWZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFFFFFFFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64ANDconst { break } @@ -10906,13 +10986,14 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // cond: uint64(c) <= 0xFFFFFFFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64AND { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y_0 := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { if y_0.Op != OpPPC64MOVDconst { continue } @@ -10930,7 +11011,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // match: (MOVWZreg (SRWconst [c] (MOVBZreg x))) // result: (SRWconst [c] (MOVBZreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -10950,7 +11030,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // match: (MOVWZreg (SRWconst [c] (MOVHZreg x))) // result: (SRWconst [c] (MOVHZreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -10970,7 +11049,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // match: (MOVWZreg (SRWconst [c] (MOVWZreg x))) // result: (SRWconst [c] (MOVWZreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -10991,7 +11069,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // cond: sizeof(x.Type) <= 32 // result: (SRWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRWconst { break } @@ -11009,7 +11086,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // cond: c>=32 // result: (SRDconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -11026,7 +11102,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // match: (MOVWZreg y:(MOVWZreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVWZreg { break } @@ -11038,7 +11114,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // match: (MOVWZreg y:(MOVHZreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHZreg { break } @@ -11050,7 +11126,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { // match: (MOVWZreg y:(MOVBZreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBZreg { break } @@ -11062,10 +11138,11 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWZreg y:(MOVHBRload _ _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHBRload { break } @@ -11078,7 +11155,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg y:(MOVWBRload _ _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVWBRload { break } @@ -11091,7 +11168,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg y:(MOVWreg x)) // result: (MOVWZreg x) for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVWreg { break } @@ -11103,7 +11180,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg x:(MOVBZload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVBZload { break } @@ -11116,7 +11193,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVBZloadidx { break } @@ -11129,7 +11206,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg x:(MOVHZload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHZload { break } @@ -11142,7 +11219,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHZloadidx { break } @@ -11155,7 +11232,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg x:(MOVWZload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVWZload { break } @@ -11168,7 +11245,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVWZloadidx { break } @@ -11182,7 +11259,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -11198,10 +11275,10 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWZreg (MOVDconst [c])) // result: (MOVDconst [int64(uint32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -11213,20 +11290,22 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { break } @@ -11243,13 +11322,12 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) { break } @@ -11268,13 +11346,13 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { break } sym := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] + mem := v_1 if !(sym == nil && p.Uses == 1) { break } @@ -11287,17 +11365,19 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx ptr (MOVDconst [c]) mem) // cond: is16Bit(c) && c%4 == 0 // result: (MOVWload [c] ptr mem) for { - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c) && c%4 == 0) { break } @@ -11311,13 +11391,12 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool { // cond: is16Bit(c) && c%4 == 0 // result: (MOVWload [c] ptr mem) for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] + ptr := v_1 + mem := v_2 if !(is16Bit(c) && c%4 == 0) { break } @@ -11330,13 +11409,14 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVWreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFFFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64ANDconst { break } @@ -11353,13 +11433,14 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // cond: uint64(c) <= 0x7FFFFFFF // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64AND { break } _ = y.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y_0 := y.Args[_i0] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { if y_0.Op != OpPPC64MOVDconst { continue } @@ -11377,7 +11458,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // match: (MOVWreg (SRAWconst [c] (MOVBreg x))) // result: (SRAWconst [c] (MOVBreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -11397,7 +11477,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // match: (MOVWreg (SRAWconst [c] (MOVHreg x))) // result: (SRAWconst [c] (MOVHreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -11417,7 +11496,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // match: (MOVWreg (SRAWconst [c] (MOVWreg x))) // result: (SRAWconst [c] (MOVWreg x)) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -11438,7 +11516,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // cond: sizeof(x.Type) <= 32 // result: (SRAWconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRAWconst { break } @@ -11456,7 +11533,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // cond: c>32 // result: (SRDconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -11474,7 +11550,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // cond: c==32 // result: (SRADconst [c] x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64SRDconst { break } @@ -11491,7 +11566,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // match: (MOVWreg y:(MOVWreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVWreg { break } @@ -11503,7 +11578,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { // match: (MOVWreg y:(MOVHreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVHreg { break } @@ -11515,10 +11590,11 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg y:(MOVBreg _)) // result: y for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVBreg { break } @@ -11530,7 +11606,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg y:(MOVWZreg x)) // result: (MOVWreg x) for { - y := v.Args[0] + y := v_0 if y.Op != OpPPC64MOVWZreg { break } @@ -11542,7 +11618,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVHload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHload { break } @@ -11555,7 +11631,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVHloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVHloadidx { break } @@ -11568,7 +11644,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVWload _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVWload { break } @@ -11581,7 +11657,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg x:(MOVWloadidx _ _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVWloadidx { break } @@ -11595,7 +11671,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -11611,7 +11687,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg (MOVDconst [c])) // result: (MOVDconst [int64(int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -11623,20 +11698,22 @@ func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) // cond: is16Bit(off1+off2) // result: (MOVWstore [off1+off2] {sym} x val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is16Bit(off1 + off2)) { break } @@ -11654,15 +11731,15 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } @@ -11679,12 +11756,11 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 { break } + mem := v_2 v.reset(OpPPC64MOVWstorezero) v.AuxInt = off v.Aux = sym @@ -11698,14 +11774,14 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64ADD { break } idx := p.Args[1] ptr := p.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(off == 0 && sym == nil && p.Uses == 1) { break } @@ -11721,13 +11797,12 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVWstore) v.AuxInt = off v.Aux = sym @@ -11741,13 +11816,12 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVWZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpPPC64MOVWstore) v.AuxInt = off v.Aux = sym @@ -11759,18 +11833,21 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) // cond: is16Bit(c) // result: (MOVWstore [c] ptr val mem) for { - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpPPC64MOVDconst { break } c := v_1.AuxInt - val := v.Args[2] + val := v_2 + mem := v_3 if !(is16Bit(c)) { break } @@ -11785,14 +11862,13 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool { // cond: is16Bit(c) // result: (MOVWstore [c] ptr val mem) for { - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - val := v.Args[2] + ptr := v_1 + val := v_2 + mem := v_3 if !(is16Bit(c)) { break } @@ -11808,14 +11884,13 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVWreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVWstoreidx) v.AuxInt = off v.Aux = sym @@ -11830,14 +11905,13 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] + ptr := v_0 + idx := v_1 if v_2.Op != OpPPC64MOVWZreg { break } x := v_2.Args[0] + mem := v_3 v.reset(OpPPC64MOVWstoreidx) v.AuxInt = off v.Aux = sym @@ -11850,19 +11924,20 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) // result: (MOVWstorezero [off1+off2] {sym} x mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] + mem := v_1 if !(is16Bit(off1 + off2)) { break } @@ -11879,14 +11954,14 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - p := v.Args[0] + p := v_0 if p.Op != OpPPC64MOVDaddr { break } off2 := p.AuxInt sym2 := p.Aux x := p.Args[0] + mem := v_1 if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } @@ -11900,12 +11975,12 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MTVSRD_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MTVSRD (MOVDconst [c])) // result: (FMOVDconst [c]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { break } @@ -11918,7 +11993,7 @@ func rewriteValuePPC64_OpPPC64MTVSRD_0(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: @x.Block (FMOVDload [off] {sym} ptr mem) for { - x := v.Args[0] + x := v_0 if x.Op != OpPPC64MOVDload { break } @@ -11942,11 +12017,11 @@ func rewriteValuePPC64_OpPPC64MTVSRD_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MaskIfNotCarry_0(v *Value) bool { + v_0 := v.Args[0] // match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) // cond: c < 0 && d > 0 && c + d < 0 // result: (MOVDconst [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconstForCarry { break } @@ -11966,12 +12041,12 @@ func rewriteValuePPC64_OpPPC64MaskIfNotCarry_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NotEqual (FlagEQ)) // result: (MOVDconst [0]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagEQ { break } @@ -11982,7 +12057,6 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { // match: (NotEqual (FlagLT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagLT { break } @@ -11993,7 +12067,6 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { // match: (NotEqual (FlagGT)) // result: (MOVDconst [1]) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64FlagGT { break } @@ -12004,7 +12077,6 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { // match: (NotEqual (InvertFlags x)) // result: (NotEqual x) for { - v_0 := v.Args[0] if v_0.Op != OpPPC64InvertFlags { break } @@ -12016,7 +12088,7 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { // match: (NotEqual cmp) // result: (ISELB [6] (MOVDconst [1]) cmp) for { - cmp := v.Args[0] + cmp := v_0 v.reset(OpPPC64ISELB) v.AuxInt = 6 v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) @@ -12027,6 +12099,8 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -12034,15 +12108,12 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // cond: d == 64-c // result: (ROTLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLDconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRDconst { continue } @@ -12061,15 +12132,12 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // cond: d == 32-c // result: (ROTLWconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLWconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRWconst { continue } @@ -12087,9 +12155,7 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) // result: (ROTL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLD { continue } @@ -12100,7 +12166,6 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRD { continue } @@ -12131,9 +12196,7 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLW { continue } @@ -12144,7 +12207,6 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRW { continue } @@ -12175,14 +12237,11 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // match: (OR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c|d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64MOVDconst { continue } @@ -12197,10 +12256,8 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // cond: isU32Bit(c) // result: (ORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpPPC64MOVDconst { continue } @@ -12220,9 +12277,8 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpPPC64MOVBZload { continue } @@ -12230,7 +12286,7 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o1 := v.Args[1^_i0] + o1 := v_1 if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { continue } @@ -12263,9 +12319,8 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpPPC64MOVBZload { continue } @@ -12273,7 +12328,7 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o1 := v.Args[1^_i0] + o1 := v_1 if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { continue } @@ -12306,9 +12361,8 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVHBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpPPC64MOVBZload { continue } @@ -12316,7 +12370,7 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o1 := v.Args[1^_i0] + o1 := v_1 if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 { continue } @@ -12351,9 +12405,8 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { // result: @mergePoint(b,x0,x1) (MOVHBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpPPC64MOVBZload { continue } @@ -12361,7 +12414,7 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o1 := v.Args[1^_i0] + o1 := v_1 if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 { continue } @@ -12394,6 +12447,8 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -12402,9 +12457,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1) (SLDconst (MOVHBRload (MOVDaddr [i0] {s} p) mem) [n1]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpPPC64SLWconst { continue } @@ -12417,7 +12471,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - s1 := v.Args[1^_i0] + s1 := v_1 if s1.Op != OpPPC64SLWconst { continue } @@ -12456,9 +12510,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1) (SLDconst (MOVHBRload (MOVDaddr [i0] {s} p) mem) [n1]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpPPC64SLDconst { continue } @@ -12471,7 +12524,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - s1 := v.Args[1^_i0] + s1 := v_1 if s1.Op != OpPPC64SLDconst { continue } @@ -12510,9 +12563,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { continue } @@ -12524,13 +12576,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x2.Aux mem := x2.Args[1] p := x2.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s0 := o0_0 if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { continue } @@ -12546,7 +12600,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - x0 := o0.Args[1^_i1] + x0 := o0_1 if x0.Op != OpPPC64MOVHZload { continue } @@ -12576,9 +12630,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { continue } @@ -12590,13 +12643,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x2.Aux mem := x2.Args[1] p := x2.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s0 := o0_0 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { continue } @@ -12612,7 +12667,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - x0 := o0.Args[1^_i1] + x0 := o0_1 if x0.Op != OpPPC64MOVHZload { continue } @@ -12642,9 +12697,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 { continue } @@ -12656,13 +12710,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x2.Aux mem := x2.Args[1] p := x2.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s0 := o0_0 if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 { continue } @@ -12678,7 +12734,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - x0 := o0.Args[1^_i1] + x0 := o0_1 if x0.Op != OpPPC64MOVHBRload || x0.Type != t { continue } @@ -12711,9 +12767,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 { continue } @@ -12725,13 +12780,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x2.Aux mem := x2.Args[1] p := x2.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s0 := o0_0 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 { continue } @@ -12747,7 +12804,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - x0 := o0.Args[1^_i1] + x0 := o0_1 if x0.Op != OpPPC64MOVHBRload || x0.Type != t { continue } @@ -12780,9 +12837,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpPPC64MOVBZload { continue } @@ -12790,13 +12846,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s0 := o0_0 if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 { continue } @@ -12812,7 +12870,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - s1 := o0.Args[1^_i1] + s1 := o0_1 if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 { continue } @@ -12849,9 +12907,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (MOVWBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpPPC64MOVBZload { continue } @@ -12859,13 +12916,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s0 := o0_0 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 { continue } @@ -12881,7 +12940,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - s1 := o0.Args[1^_i1] + s1 := o0_1 if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 { continue } @@ -12918,9 +12977,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s2 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s2 := v_0 if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 { continue } @@ -12932,13 +12990,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x2.Aux mem := x2.Args[1] p := x2.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s1 := o0_0 if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 { continue } @@ -12954,7 +13014,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - s0 := o0.Args[1^_i1] + s0 := o0_1 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 { continue } @@ -12994,9 +13054,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { // result: @mergePoint(b,x0,x1,x2) (SLDconst (MOVWBRload (MOVDaddr [i0] {s} p) mem) [32]) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s2 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s2 := v_0 if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 { continue } @@ -13008,13 +13067,15 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { s := x2.Aux mem := x2.Args[1] p := x2.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s1 := o0_0 if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { continue } @@ -13030,7 +13091,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - s0 := o0.Args[1^_i1] + s0 := o0_1 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { continue } @@ -13068,6 +13129,8 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -13076,9 +13139,8 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s6 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s6 := v_0 if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 { continue } @@ -13090,13 +13152,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { s := x7.Aux mem := x7.Args[1] p := x7.Args[0] - o5 := v.Args[1^_i0] + o5 := v_1 if o5.Op != OpPPC64OR || o5.Type != t { continue } _ = o5.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s5 := o5.Args[_i1] + o5_0 := o5.Args[0] + o5_1 := o5.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 { + s5 := o5_0 if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 { continue } @@ -13112,13 +13176,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x6.Args[0] || mem != x6.Args[1] { continue } - o4 := o5.Args[1^_i1] + o4 := o5_1 if o4.Op != OpPPC64OR || o4.Type != t { continue } _ = o4.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s4 := o4.Args[_i2] + o4_0 := o4.Args[0] + o4_1 := o4.Args[1] + for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 { + s4 := o4_0 if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 { continue } @@ -13134,13 +13200,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x5.Args[0] || mem != x5.Args[1] { continue } - o3 := o4.Args[1^_i2] + o3 := o4_1 if o3.Op != OpPPC64OR || o3.Type != t { continue } _ = o3.Args[1] - for _i3 := 0; _i3 <= 1; _i3++ { - s3 := o3.Args[_i3] + o3_0 := o3.Args[0] + o3_1 := o3.Args[1] + for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 { + s3 := o3_0 if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { continue } @@ -13156,7 +13224,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x4.Args[0] || mem != x4.Args[1] { continue } - x0 := o3.Args[1^_i3] + x0 := o3_1 if x0.Op != OpPPC64MOVWZload { continue } @@ -13188,9 +13256,8 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 { continue } @@ -13202,13 +13269,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - o0 := v.Args[1^_i0] + o0 := v_1 if o0.Op != OpPPC64OR || o0.Type != t { continue } _ = o0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := o0.Args[_i1] + o0_0 := o0.Args[0] + o0_1 := o0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 { + s1 := o0_0 if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 { continue } @@ -13224,13 +13293,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - o1 := o0.Args[1^_i1] + o1 := o0_1 if o1.Op != OpPPC64OR || o1.Type != t { continue } _ = o1.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s2 := o1.Args[_i2] + o1_0 := o1.Args[0] + o1_1 := o1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, o1_0, o1_1 = _i2+1, o1_1, o1_0 { + s2 := o1_0 if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 { continue } @@ -13246,13 +13317,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x2.Args[0] || mem != x2.Args[1] { continue } - o2 := o1.Args[1^_i2] + o2 := o1_1 if o2.Op != OpPPC64OR || o2.Type != t { continue } _ = o2.Args[1] - for _i3 := 0; _i3 <= 1; _i3++ { - s3 := o2.Args[_i3] + o2_0 := o2.Args[0] + o2_1 := o2.Args[1] + for _i3 := 0; _i3 <= 1; _i3, o2_0, o2_1 = _i3+1, o2_1, o2_0 { + s3 := o2_0 if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 { continue } @@ -13268,7 +13341,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x3.Args[0] || mem != x3.Args[1] { continue } - x4 := o2.Args[1^_i3] + x4 := o2_1 if x4.Op != OpPPC64MOVWBRload || x4.Type != t { continue } @@ -13303,9 +13376,8 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x7 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x7 := v_0 if x7.Op != OpPPC64MOVBZload { continue } @@ -13313,13 +13385,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { s := x7.Aux mem := x7.Args[1] p := x7.Args[0] - o5 := v.Args[1^_i0] + o5 := v_1 if o5.Op != OpPPC64OR || o5.Type != t { continue } _ = o5.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s6 := o5.Args[_i1] + o5_0 := o5.Args[0] + o5_1 := o5.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 { + s6 := o5_0 if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { continue } @@ -13335,13 +13409,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x6.Args[0] || mem != x6.Args[1] { continue } - o4 := o5.Args[1^_i1] + o4 := o5_1 if o4.Op != OpPPC64OR || o4.Type != t { continue } _ = o4.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s5 := o4.Args[_i2] + o4_0 := o4.Args[0] + o4_1 := o4.Args[1] + for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 { + s5 := o4_0 if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { continue } @@ -13357,13 +13433,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x5.Args[0] || mem != x5.Args[1] { continue } - o3 := o4.Args[1^_i2] + o3 := o4_1 if o3.Op != OpPPC64OR || o3.Type != t { continue } _ = o3.Args[1] - for _i3 := 0; _i3 <= 1; _i3++ { - s4 := o3.Args[_i3] + o3_0 := o3.Args[0] + o3_1 := o3.Args[1] + for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 { + s4 := o3_0 if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { continue } @@ -13379,7 +13457,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x4.Args[0] || mem != x4.Args[1] { continue } - s0 := o3.Args[1^_i3] + s0 := o3_1 if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 { continue } @@ -13418,9 +13496,8 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload (MOVDaddr [i0] {s} p) mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x7 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x7 := v_0 if x7.Op != OpPPC64MOVBZload { continue } @@ -13428,13 +13505,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { s := x7.Aux mem := x7.Args[1] p := x7.Args[0] - o5 := v.Args[1^_i0] + o5 := v_1 if o5.Op != OpPPC64OR || o5.Type != t { continue } _ = o5.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s6 := o5.Args[_i1] + o5_0 := o5.Args[0] + o5_1 := o5.Args[1] + for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 { + s6 := o5_0 if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 { continue } @@ -13450,13 +13529,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x6.Args[0] || mem != x6.Args[1] { continue } - o4 := o5.Args[1^_i1] + o4 := o5_1 if o4.Op != OpPPC64OR || o4.Type != t { continue } _ = o4.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s5 := o4.Args[_i2] + o4_0 := o4.Args[0] + o4_1 := o4.Args[1] + for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 { + s5 := o4_0 if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 { continue } @@ -13472,13 +13553,15 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x5.Args[0] || mem != x5.Args[1] { continue } - o3 := o4.Args[1^_i2] + o3 := o4_1 if o3.Op != OpPPC64OR || o3.Type != t { continue } _ = o3.Args[1] - for _i3 := 0; _i3 <= 1; _i3++ { - s4 := o3.Args[_i3] + o3_0 := o3.Args[0] + o3_1 := o3.Args[1] + for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 { + s4 := o3_0 if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 { continue } @@ -13494,7 +13577,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { if p != x4.Args[0] || mem != x4.Args[1] { continue } - s0 := o3.Args[1^_i3] + s0 := o3_1 if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 { continue } @@ -13531,12 +13614,12 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ORN_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ORN x (MOVDconst [-1])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != -1 { break } @@ -13548,11 +13631,11 @@ func rewriteValuePPC64_OpPPC64ORN_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORconst [c] (ORconst [d] x)) // result: (ORconst [c|d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64ORconst { break } @@ -13579,7 +13662,7 @@ func rewriteValuePPC64_OpPPC64ORconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -13588,12 +13671,12 @@ func rewriteValuePPC64_OpPPC64ORconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ROTL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ROTL x (MOVDconst [c])) // result: (ROTLconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -13606,12 +13689,12 @@ func rewriteValuePPC64_OpPPC64ROTL_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64ROTLW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ROTLW x (MOVDconst [c])) // result: (ROTLWconst x [c&31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -13624,13 +13707,13 @@ func rewriteValuePPC64_OpPPC64ROTLW_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64SUB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUB x (MOVDconst [c])) // cond: is32Bit(-c) // result: (ADDconst [-c] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -13646,21 +13729,20 @@ func rewriteValuePPC64_OpPPC64SUB_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (XOR (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (ROTLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLDconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRDconst { continue } @@ -13679,15 +13761,12 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { // cond: d == 32-c // result: (ROTLWconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLWconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRWconst { continue } @@ -13705,9 +13784,7 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { // match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) // result: (ROTL x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLD { continue } @@ -13718,7 +13795,6 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRD { continue } @@ -13749,9 +13825,7 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) // result: (ROTLW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64SLW { continue } @@ -13762,7 +13836,6 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { continue } y := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64SRW { continue } @@ -13793,14 +13866,11 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpPPC64MOVDconst { continue } @@ -13815,10 +13885,8 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { // cond: isU32Bit(c) // result: (XORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpPPC64MOVDconst { continue } @@ -13836,11 +13904,11 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64XORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORconst [c] (XORconst [d] x)) // result: (XORconst [c^d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpPPC64XORconst { break } @@ -13857,7 +13925,7 @@ func rewriteValuePPC64_OpPPC64XORconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -13866,14 +13934,17 @@ func rewriteValuePPC64_OpPPC64XORconst_0(v *Value) bool { return false } func rewriteValuePPC64_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -13889,9 +13960,9 @@ func rewriteValuePPC64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -13907,9 +13978,9 @@ func rewriteValuePPC64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -13923,12 +13994,13 @@ func rewriteValuePPC64_OpPanicBounds_0(v *Value) bool { return false } func rewriteValuePPC64_OpPopCount16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount16 x) // result: (POPCNTW (MOVHZreg x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64POPCNTW) v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) @@ -13937,12 +14009,13 @@ func rewriteValuePPC64_OpPopCount16_0(v *Value) bool { } } func rewriteValuePPC64_OpPopCount32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount32 x) // result: (POPCNTW (MOVWZreg x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64POPCNTW) v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) v0.AddArg(x) @@ -13951,22 +14024,24 @@ func rewriteValuePPC64_OpPopCount32_0(v *Value) bool { } } func rewriteValuePPC64_OpPopCount64_0(v *Value) bool { + v_0 := v.Args[0] // match: (PopCount64 x) // result: (POPCNTD x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64POPCNTD) v.AddArg(x) return true } } func rewriteValuePPC64_OpPopCount8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount8 x) // result: (POPCNTB (MOVBZreg x)) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64POPCNTB) v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) @@ -13975,15 +14050,15 @@ func rewriteValuePPC64_OpPopCount8_0(v *Value) bool { } } func rewriteValuePPC64_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVDconst [c])) // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14006,12 +14081,12 @@ func rewriteValuePPC64_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValuePPC64_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft32 x (MOVDconst [c])) // result: (ROTLWconst [c&31] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14024,8 +14099,8 @@ func rewriteValuePPC64_OpRotateLeft32_0(v *Value) bool { // match: (RotateLeft32 x y) // result: (ROTLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ROTLW) v.AddArg(x) v.AddArg(y) @@ -14033,12 +14108,12 @@ func rewriteValuePPC64_OpRotateLeft32_0(v *Value) bool { } } func rewriteValuePPC64_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft64 x (MOVDconst [c])) // result: (ROTLconst [c&63] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14051,8 +14126,8 @@ func rewriteValuePPC64_OpRotateLeft64_0(v *Value) bool { // match: (RotateLeft64 x y) // result: (ROTL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64ROTL) v.AddArg(x) v.AddArg(y) @@ -14060,15 +14135,15 @@ func rewriteValuePPC64_OpRotateLeft64_0(v *Value) bool { } } func rewriteValuePPC64_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVDconst [c])) // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14091,44 +14166,49 @@ func rewriteValuePPC64_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValuePPC64_OpRound_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round x) // result: (FROUND x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FROUND) v.AddArg(x) return true } } func rewriteValuePPC64_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: (LoweredRound32F x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64LoweredRound32F) v.AddArg(x) return true } } func rewriteValuePPC64_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: (LoweredRound64F x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64LoweredRound64F) v.AddArg(x) return true } } func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14142,8 +14222,8 @@ func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool { // match: (Rsh16Ux16 x y) // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -14163,15 +14243,15 @@ func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14190,9 +14270,7 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool { // cond: uint32(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14211,8 +14289,8 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14226,8 +14304,8 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool { // match: (Rsh16Ux32 x y) // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -14247,15 +14325,15 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14274,8 +14352,6 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -14291,9 +14367,7 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { // cond: uint64(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14312,8 +14386,8 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14327,8 +14401,8 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { // match: (Rsh16Ux64 x y) // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -14346,14 +14420,16 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14367,8 +14443,8 @@ func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool { // match: (Rsh16Ux8 x y) // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -14388,14 +14464,16 @@ func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14409,8 +14487,8 @@ func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool { // match: (Rsh16x16 x y) // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -14430,15 +14508,15 @@ func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14457,9 +14535,7 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool { // cond: uint32(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14478,8 +14554,8 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14493,8 +14569,8 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool { // match: (Rsh16x32 x y) // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -14514,15 +14590,15 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14541,9 +14617,7 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (SRAWconst (SignExt16to32 x) [63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14562,9 +14636,7 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { // cond: uint64(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14583,8 +14655,8 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14598,8 +14670,8 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { // match: (Rsh16x64 x y) // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -14617,14 +14689,16 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14638,8 +14712,8 @@ func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool { // match: (Rsh16x8 x y) // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -14659,14 +14733,16 @@ func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14678,8 +14754,8 @@ func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool { // match: (Rsh32Ux16 x y) // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -14697,15 +14773,15 @@ func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SRWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14722,9 +14798,7 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool { // cond: uint32(c) < 32 // result: (SRWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14741,8 +14815,8 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14754,8 +14828,8 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool { // match: (Rsh32Ux32 x y) // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -14773,15 +14847,15 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SRWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -14798,8 +14872,6 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -14815,9 +14887,7 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // cond: uint64(c) < 32 // result: (SRWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -14834,8 +14904,8 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14847,16 +14917,15 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) // result: (SRW x (ANDconst [31] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64AND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1.Args[_i0] - v_1_1 := v_1.Args[1^_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { continue } @@ -14873,9 +14942,7 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x (ANDconst [31] y)) // result: (SRW x (ANDconst [31] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 31 { break } @@ -14891,9 +14958,7 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -14923,9 +14988,7 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -14939,9 +15002,10 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { break } _ = v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1_1.Args[_i0] - v_1_1_1 := v_1_1.Args[1^_i0] + v_1_1_0 := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { + y := v_1_1_0 if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 { continue } @@ -14963,8 +15027,8 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x y) // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -14980,14 +15044,16 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -14999,8 +15065,8 @@ func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool { // match: (Rsh32Ux8 x y) // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15018,14 +15084,16 @@ func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15037,8 +15105,8 @@ func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool { // match: (Rsh32x16 x y) // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15056,15 +15124,15 @@ func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SRAWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15081,9 +15149,7 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool { // cond: uint32(c) < 32 // result: (SRAWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -15100,8 +15166,8 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15113,8 +15179,8 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool { // match: (Rsh32x32 x y) // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15132,15 +15198,15 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SRAWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15157,9 +15223,7 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (SRAWconst x [63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15176,9 +15240,7 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // cond: uint64(c) < 32 // result: (SRAWconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -15195,8 +15257,8 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15208,16 +15270,15 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x (AND y (MOVDconst [31]))) // result: (SRAW x (ANDconst [31] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64AND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1.Args[_i0] - v_1_1 := v_1.Args[1^_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 { continue } @@ -15234,9 +15295,7 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x (ANDconst [31] y)) // result: (SRAW x (ANDconst [31] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 31 { break } @@ -15252,9 +15311,7 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -15284,9 +15341,7 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -15300,9 +15355,10 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { break } _ = v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1_1.Args[_i0] - v_1_1_1 := v_1_1.Args[1^_i0] + v_1_1_0 := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { + y := v_1_1_0 if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 { continue } @@ -15324,8 +15380,8 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x y) // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15341,14 +15397,16 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15360,8 +15418,8 @@ func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool { // match: (Rsh32x8 x y) // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15379,14 +15437,16 @@ func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15398,8 +15458,8 @@ func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool { // match: (Rsh64Ux16 x y) // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15417,15 +15477,15 @@ func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x (Const64 [c])) // cond: uint32(c) < 64 // result: (SRDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15442,9 +15502,7 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool { // cond: uint32(c) < 64 // result: (SRDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -15461,8 +15519,8 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15474,8 +15532,8 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool { // match: (Rsh64Ux32 x y) // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15493,15 +15551,15 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux64 x (Const64 [c])) // cond: uint64(c) < 64 // result: (SRDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15518,8 +15576,6 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -15535,9 +15591,7 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // cond: uint64(c) < 64 // result: (SRDconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -15554,8 +15608,8 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15567,16 +15621,15 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) // result: (SRD x (ANDconst [63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64AND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1.Args[_i0] - v_1_1 := v_1.Args[1^_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { continue } @@ -15593,9 +15646,7 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x (ANDconst [63] y)) // result: (SRD x (ANDconst [63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 63 { break } @@ -15611,9 +15662,7 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -15643,9 +15692,7 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -15659,9 +15706,10 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { break } _ = v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1_1.Args[_i0] - v_1_1_1 := v_1_1.Args[1^_i0] + v_1_1_0 := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { + y := v_1_1_0 if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 { continue } @@ -15683,8 +15731,8 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x y) // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15700,14 +15748,16 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15719,8 +15769,8 @@ func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool { // match: (Rsh64Ux8 x y) // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15738,14 +15788,16 @@ func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15757,8 +15809,8 @@ func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool { // match: (Rsh64x16 x y) // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15776,15 +15828,15 @@ func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x (Const64 [c])) // cond: uint32(c) < 64 // result: (SRADconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15801,9 +15853,7 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool { // cond: uint32(c) < 64 // result: (SRADconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -15820,8 +15870,8 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15833,8 +15883,8 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool { // match: (Rsh64x32 x y) // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -15852,15 +15902,15 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x64 x (Const64 [c])) // cond: uint64(c) < 64 // result: (SRADconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15877,9 +15927,7 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (SRADconst x [63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -15896,9 +15944,7 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // cond: uint64(c) < 64 // result: (SRADconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -15915,8 +15961,8 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -15928,16 +15974,15 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x (AND y (MOVDconst [63]))) // result: (SRAD x (ANDconst [63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64AND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1.Args[_i0] - v_1_1 := v_1.Args[1^_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 { continue } @@ -15954,9 +15999,7 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x (ANDconst [63] y)) // result: (SRAD x (ANDconst [63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 63 { break } @@ -15972,9 +16015,7 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -16004,9 +16045,7 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { break } @@ -16020,9 +16059,10 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { break } _ = v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - y := v_1_1.Args[_i0] - v_1_1_1 := v_1_1.Args[1^_i0] + v_1_1_0 := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { + y := v_1_1_0 if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 { continue } @@ -16044,8 +16084,8 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x y) // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -16061,14 +16101,16 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16080,8 +16122,8 @@ func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool { // match: (Rsh64x8 x y) // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64) @@ -16099,14 +16141,16 @@ func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16120,8 +16164,8 @@ func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool { // match: (Rsh8Ux16 x y) // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -16141,15 +16185,15 @@ func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16168,9 +16212,7 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool { // cond: uint32(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -16189,8 +16231,8 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16204,8 +16246,8 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool { // match: (Rsh8Ux32 x y) // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -16225,15 +16267,15 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16252,8 +16294,6 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -16269,9 +16309,7 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { // cond: uint64(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -16290,8 +16328,8 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16305,8 +16343,8 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { // match: (Rsh8Ux64 x y) // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -16324,14 +16362,16 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16345,8 +16385,8 @@ func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool { // match: (Rsh8Ux8 x y) // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -16366,14 +16406,16 @@ func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16387,8 +16429,8 @@ func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool { // match: (Rsh8x16 x y) // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -16408,15 +16450,15 @@ func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16435,9 +16477,7 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool { // cond: uint32(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -16456,8 +16496,8 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16471,8 +16511,8 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool { // match: (Rsh8x32 x y) // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -16492,15 +16532,15 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16519,9 +16559,7 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (SRAWconst (SignExt8to32 x) [63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -16540,9 +16578,7 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { // cond: uint64(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpPPC64MOVDconst { break } @@ -16561,8 +16597,8 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16576,8 +16612,8 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { // match: (Rsh8x64 x y) // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -16595,14 +16631,16 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { } } func rewriteValuePPC64_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -16616,8 +16654,8 @@ func rewriteValuePPC64_OpRsh8x8_0(v *Value) bool { // match: (Rsh8x8 x y) // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -16637,72 +16675,79 @@ func rewriteValuePPC64_OpRsh8x8_0(v *Value) bool { } } func rewriteValuePPC64_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVHreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to64 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVHreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt32to64 x) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVWreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to64 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SRADconst (NEG x) [63]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpPPC64SRADconst) v.AuxInt = 63 v0 := b.NewValue0(v.Pos, OpPPC64NEG, t) @@ -16712,22 +16757,24 @@ func rewriteValuePPC64_OpSlicemask_0(v *Value) bool { } } func rewriteValuePPC64_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (FSQRT x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FSQRT) v.AddArg(x) return true } } func rewriteValuePPC64_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpPPC64CALLstatic) v.AuxInt = argwid v.Aux = target @@ -16736,14 +16783,17 @@ func rewriteValuePPC64_OpStaticCall_0(v *Value) bool { } } func rewriteValuePPC64_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) // result: (FMOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -16758,9 +16808,9 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { // result: (FMOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is32BitFloat(val.Type)) { break } @@ -16775,9 +16825,9 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { // result: (FMOVSstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -16792,9 +16842,9 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) { break } @@ -16809,9 +16859,9 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitInt(val.Type)) { break } @@ -16826,9 +16876,9 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -16843,9 +16893,9 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -16858,11 +16908,13 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool { return false } func rewriteValuePPC64_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v.AddArg(y) @@ -16870,11 +16922,13 @@ func rewriteValuePPC64_OpSub16_0(v *Value) bool { } } func rewriteValuePPC64_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v.AddArg(y) @@ -16882,11 +16936,13 @@ func rewriteValuePPC64_OpSub32_0(v *Value) bool { } } func rewriteValuePPC64_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (FSUBS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FSUBS) v.AddArg(x) v.AddArg(y) @@ -16894,11 +16950,13 @@ func rewriteValuePPC64_OpSub32F_0(v *Value) bool { } } func rewriteValuePPC64_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v.AddArg(y) @@ -16906,11 +16964,13 @@ func rewriteValuePPC64_OpSub64_0(v *Value) bool { } } func rewriteValuePPC64_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (FSUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64FSUB) v.AddArg(x) v.AddArg(y) @@ -16918,11 +16978,13 @@ func rewriteValuePPC64_OpSub64F_0(v *Value) bool { } } func rewriteValuePPC64_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v.AddArg(y) @@ -16930,11 +16992,13 @@ func rewriteValuePPC64_OpSub8_0(v *Value) bool { } } func rewriteValuePPC64_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64SUB) v.AddArg(x) v.AddArg(y) @@ -16942,22 +17006,24 @@ func rewriteValuePPC64_OpSubPtr_0(v *Value) bool { } } func rewriteValuePPC64_OpTrunc_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc x) // result: (FTRUNC x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64FTRUNC) v.AddArg(x) return true } } func rewriteValuePPC64_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // cond: isSigned(t) // result: (MOVBreg x) for { t := v.Type - x := v.Args[0] + x := v_0 if !(isSigned(t)) { break } @@ -16968,19 +17034,20 @@ func rewriteValuePPC64_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // cond: isSigned(t) // result: (MOVHreg x) for { t := v.Type - x := v.Args[0] + x := v_0 if !(isSigned(t)) { break } @@ -16991,19 +17058,20 @@ func rewriteValuePPC64_OpTrunc32to16_0(v *Value) bool { // match: (Trunc32to16 x) // result: (MOVHZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVHZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // cond: isSigned(t) // result: (MOVBreg x) for { t := v.Type - x := v.Args[0] + x := v_0 if !(isSigned(t)) { break } @@ -17014,19 +17082,20 @@ func rewriteValuePPC64_OpTrunc32to8_0(v *Value) bool { // match: (Trunc32to8 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 x) // cond: isSigned(t) // result: (MOVHreg x) for { t := v.Type - x := v.Args[0] + x := v_0 if !(isSigned(t)) { break } @@ -17037,19 +17106,20 @@ func rewriteValuePPC64_OpTrunc64to16_0(v *Value) bool { // match: (Trunc64to16 x) // result: (MOVHZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVHZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 x) // cond: isSigned(t) // result: (MOVWreg x) for { t := v.Type - x := v.Args[0] + x := v_0 if !(isSigned(t)) { break } @@ -17060,19 +17130,20 @@ func rewriteValuePPC64_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 x) // result: (MOVWZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVWZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 x) // cond: isSigned(t) // result: (MOVBreg x) for { t := v.Type - x := v.Args[0] + x := v_0 if !(isSigned(t)) { break } @@ -17083,20 +17154,23 @@ func rewriteValuePPC64_OpTrunc64to8_0(v *Value) bool { // match: (Trunc64to8 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpPPC64LoweredWB) v.Aux = fn v.AddArg(destptr) @@ -17106,11 +17180,13 @@ func rewriteValuePPC64_OpWB_0(v *Value) bool { } } func rewriteValuePPC64_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64XOR) v.AddArg(x) v.AddArg(y) @@ -17118,11 +17194,13 @@ func rewriteValuePPC64_OpXor16_0(v *Value) bool { } } func rewriteValuePPC64_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64XOR) v.AddArg(x) v.AddArg(y) @@ -17130,11 +17208,13 @@ func rewriteValuePPC64_OpXor32_0(v *Value) bool { } } func rewriteValuePPC64_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor64 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64XOR) v.AddArg(x) v.AddArg(y) @@ -17142,11 +17222,13 @@ func rewriteValuePPC64_OpXor64_0(v *Value) bool { } } func rewriteValuePPC64_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpPPC64XOR) v.AddArg(x) v.AddArg(y) @@ -17154,6 +17236,8 @@ func rewriteValuePPC64_OpXor8_0(v *Value) bool { } } func rewriteValuePPC64_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Zero [0] _ mem) // result: mem @@ -17161,7 +17245,7 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -17173,8 +17257,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AddArg(destptr) v.AddArg(mem) @@ -17186,8 +17270,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVHstorezero) v.AddArg(destptr) v.AddArg(mem) @@ -17199,8 +17283,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 2 v.AddArg(destptr) @@ -17216,8 +17300,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVWstorezero) v.AddArg(destptr) v.AddArg(mem) @@ -17229,8 +17313,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 4 v.AddArg(destptr) @@ -17246,8 +17330,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVHstorezero) v.AuxInt = 4 v.AddArg(destptr) @@ -17263,8 +17347,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVBstorezero) v.AuxInt = 6 v.AddArg(destptr) @@ -17286,8 +17370,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -17302,8 +17386,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpPPC64MOVWstorezero) v.AuxInt = 4 v.AddArg(destptr) @@ -17317,6 +17401,8 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { return false } func rewriteValuePPC64_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Zero [12] {t} destptr mem) // cond: t.(*types.Type).Alignment()%4 == 0 @@ -17326,8 +17412,8 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -17349,8 +17435,8 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -17372,8 +17458,8 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -17399,8 +17485,8 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { break } t := v.Aux - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(t.(*types.Type).Alignment()%4 == 0) { break } @@ -17426,8 +17512,8 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { // result: (LoweredZero [s] ptr mem) for { s := v.AuxInt - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpPPC64LoweredZero) v.AuxInt = s v.AddArg(ptr) @@ -17436,60 +17522,66 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { } } func rewriteValuePPC64_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVHZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVHZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to64 x) // result: (MOVHZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVHZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt32to64 x) // result: (MOVWZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVWZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBZreg) v.AddArg(x) return true } } func rewriteValuePPC64_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to64 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpPPC64MOVBZreg) v.AddArg(x) return true @@ -17620,9 +17712,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17648,9 +17742,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17676,9 +17772,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17773,9 +17871,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17801,9 +17901,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17829,9 +17931,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17927,9 +18031,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17955,9 +18061,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -17983,9 +18091,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18182,9 +18292,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18210,9 +18322,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18238,9 +18352,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18336,9 +18452,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18364,9 +18482,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18392,9 +18512,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18689,9 +18811,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18717,9 +18841,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } @@ -18745,9 +18871,11 @@ func rewriteBlockPPC64(b *Block) bool { break } _ = z.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := z.Args[_i0] - y := z.Args[1^_i0] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 if !(z.Uses == 1) { continue } diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 2c23609d48..5e5c472aab 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -512,11 +512,13 @@ func rewriteValueRISCV64(v *Value) bool { return false } func rewriteValueRISCV64_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64ADD) v.AddArg(x) v.AddArg(y) @@ -524,11 +526,13 @@ func rewriteValueRISCV64_OpAdd16_0(v *Value) bool { } } func rewriteValueRISCV64_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64ADD) v.AddArg(x) v.AddArg(y) @@ -536,11 +540,13 @@ func rewriteValueRISCV64_OpAdd32_0(v *Value) bool { } } func rewriteValueRISCV64_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (FADDS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FADDS) v.AddArg(x) v.AddArg(y) @@ -548,11 +554,13 @@ func rewriteValueRISCV64_OpAdd32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64ADD) v.AddArg(x) v.AddArg(y) @@ -560,11 +568,13 @@ func rewriteValueRISCV64_OpAdd64_0(v *Value) bool { } } func rewriteValueRISCV64_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (FADDD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FADDD) v.AddArg(x) v.AddArg(y) @@ -572,11 +582,13 @@ func rewriteValueRISCV64_OpAdd64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64ADD) v.AddArg(x) v.AddArg(y) @@ -584,11 +596,13 @@ func rewriteValueRISCV64_OpAdd8_0(v *Value) bool { } } func rewriteValueRISCV64_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64ADD) v.AddArg(x) v.AddArg(y) @@ -596,11 +610,12 @@ func rewriteValueRISCV64_OpAddPtr_0(v *Value) bool { } } func rewriteValueRISCV64_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVaddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpRISCV64MOVaddr) v.Aux = sym v.AddArg(base) @@ -608,11 +623,13 @@ func rewriteValueRISCV64_OpAddr_0(v *Value) bool { } } func rewriteValueRISCV64_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v.AddArg(x) v.AddArg(y) @@ -620,11 +637,13 @@ func rewriteValueRISCV64_OpAnd16_0(v *Value) bool { } } func rewriteValueRISCV64_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v.AddArg(x) v.AddArg(y) @@ -632,11 +651,13 @@ func rewriteValueRISCV64_OpAnd32_0(v *Value) bool { } } func rewriteValueRISCV64_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And64 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v.AddArg(x) v.AddArg(y) @@ -644,11 +665,13 @@ func rewriteValueRISCV64_OpAnd64_0(v *Value) bool { } } func rewriteValueRISCV64_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v.AddArg(x) v.AddArg(y) @@ -656,11 +679,13 @@ func rewriteValueRISCV64_OpAnd8_0(v *Value) bool { } } func rewriteValueRISCV64_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v.AddArg(x) v.AddArg(y) @@ -668,13 +693,15 @@ func rewriteValueRISCV64_OpAndB_0(v *Value) bool { } } func rewriteValueRISCV64_OpAvg64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Avg64u x y) // result: (ADD (ADD (SRLI [1] x) (SRLI [1] y)) (ANDI [1] (AND x y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64ADD) v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t) v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) @@ -697,13 +724,16 @@ func rewriteValueRISCV64_OpAvg64u_0(v *Value) bool { } } func rewriteValueRISCV64_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpRISCV64CALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -713,10 +743,11 @@ func rewriteValueRISCV64_OpClosureCall_0(v *Value) bool { } } func rewriteValueRISCV64_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (XORI [int64(-1)] x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64XORI) v.AuxInt = int64(-1) v.AddArg(x) @@ -724,10 +755,11 @@ func rewriteValueRISCV64_OpCom16_0(v *Value) bool { } } func rewriteValueRISCV64_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (XORI [int64(-1)] x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64XORI) v.AuxInt = int64(-1) v.AddArg(x) @@ -735,10 +767,11 @@ func rewriteValueRISCV64_OpCom32_0(v *Value) bool { } } func rewriteValueRISCV64_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com64 x) // result: (XORI [int64(-1)] x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64XORI) v.AuxInt = int64(-1) v.AddArg(x) @@ -746,10 +779,11 @@ func rewriteValueRISCV64_OpCom64_0(v *Value) bool { } } func rewriteValueRISCV64_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (XORI [int64(-1)] x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64XORI) v.AuxInt = int64(-1) v.AddArg(x) @@ -844,11 +878,13 @@ func rewriteValueRISCV64_OpConstNil_0(v *Value) bool { } } func rewriteValueRISCV64_OpConvert_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Convert x mem) // result: (MOVconvert x mem) for { - mem := v.Args[1] - x := v.Args[0] + x := v_0 + mem := v_1 v.reset(OpRISCV64MOVconvert) v.AddArg(x) v.AddArg(mem) @@ -856,113 +892,125 @@ func rewriteValueRISCV64_OpConvert_0(v *Value) bool { } } func rewriteValueRISCV64_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (FCVTWS x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTWS) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64 x) // result: (FCVTLS x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTLS) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (FCVTDS x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTDS) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (FCVTSW x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTSW) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (FCVTDW x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTDW) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (FCVTWD x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTWD) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (FCVTSD x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTSD) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64 x) // result: (FCVTLD x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTLD) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to32F x) // result: (FCVTSL x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTSL) v.AddArg(x) return true } } func rewriteValueRISCV64_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to64F x) // result: (FCVTDL x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FCVTDL) v.AddArg(x) return true } } func rewriteValueRISCV64_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -974,13 +1022,15 @@ func rewriteValueRISCV64_OpDiv16_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVUW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -992,11 +1042,13 @@ func rewriteValueRISCV64_OpDiv16u_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32 x y) // result: (DIVW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVW) v.AddArg(x) v.AddArg(y) @@ -1004,11 +1056,13 @@ func rewriteValueRISCV64_OpDiv32_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (FDIVS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FDIVS) v.AddArg(x) v.AddArg(y) @@ -1016,11 +1070,13 @@ func rewriteValueRISCV64_OpDiv32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32u x y) // result: (DIVUW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVUW) v.AddArg(x) v.AddArg(y) @@ -1028,11 +1084,13 @@ func rewriteValueRISCV64_OpDiv32u_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64 x y) // result: (DIV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIV) v.AddArg(x) v.AddArg(y) @@ -1040,11 +1098,13 @@ func rewriteValueRISCV64_OpDiv64_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (FDIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FDIVD) v.AddArg(x) v.AddArg(y) @@ -1052,11 +1112,13 @@ func rewriteValueRISCV64_OpDiv64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64u x y) // result: (DIVU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVU) v.AddArg(x) v.AddArg(y) @@ -1064,13 +1126,15 @@ func rewriteValueRISCV64_OpDiv64u_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -1082,13 +1146,15 @@ func rewriteValueRISCV64_OpDiv8_0(v *Value) bool { } } func rewriteValueRISCV64_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64DIVUW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -1100,13 +1166,15 @@ func rewriteValueRISCV64_OpDiv8u_0(v *Value) bool { } } func rewriteValueRISCV64_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (SEQZ (ZeroExt16to64 (SUB x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) @@ -1118,13 +1186,15 @@ func rewriteValueRISCV64_OpEq16_0(v *Value) bool { } } func rewriteValueRISCV64_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x y) // result: (SEQZ (ZeroExt32to64 (SUB x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) @@ -1136,11 +1206,13 @@ func rewriteValueRISCV64_OpEq32_0(v *Value) bool { } } func rewriteValueRISCV64_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq32F x y) // result: (FEQS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FEQS) v.AddArg(x) v.AddArg(y) @@ -1148,12 +1220,14 @@ func rewriteValueRISCV64_OpEq32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64 x y) // result: (SEQZ (SUB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) v0.AddArg(x) @@ -1163,11 +1237,13 @@ func rewriteValueRISCV64_OpEq64_0(v *Value) bool { } } func rewriteValueRISCV64_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq64F x y) // result: (FEQD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FEQD) v.AddArg(x) v.AddArg(y) @@ -1175,13 +1251,15 @@ func rewriteValueRISCV64_OpEq64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (SEQZ (ZeroExt8to64 (SUB x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) @@ -1193,13 +1271,15 @@ func rewriteValueRISCV64_OpEq8_0(v *Value) bool { } } func rewriteValueRISCV64_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (XORI [1] (XOR x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64XORI) v.AuxInt = 1 v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool) @@ -1210,12 +1290,14 @@ func rewriteValueRISCV64_OpEqB_0(v *Value) bool { } } func rewriteValueRISCV64_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (SEQZ (SUB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) v0.AddArg(x) @@ -1225,13 +1307,15 @@ func rewriteValueRISCV64_OpEqPtr_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (Not (Less16 x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) v0.AddArg(x) @@ -1241,13 +1325,15 @@ func rewriteValueRISCV64_OpGeq16_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (Not (Less16U x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) v0.AddArg(x) @@ -1257,13 +1343,15 @@ func rewriteValueRISCV64_OpGeq16U_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32 x y) // result: (Not (Less32 x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v0.AddArg(x) @@ -1273,11 +1361,13 @@ func rewriteValueRISCV64_OpGeq32_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq32F x y) // result: (FLES y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLES) v.AddArg(y) v.AddArg(x) @@ -1285,13 +1375,15 @@ func rewriteValueRISCV64_OpGeq32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32U x y) // result: (Not (Less32U x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v0.AddArg(x) @@ -1301,13 +1393,15 @@ func rewriteValueRISCV64_OpGeq32U_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64 x y) // result: (Not (Less64 x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) v0.AddArg(x) @@ -1317,11 +1411,13 @@ func rewriteValueRISCV64_OpGeq64_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64F x y) // result: (FLED y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLED) v.AddArg(y) v.AddArg(x) @@ -1329,13 +1425,15 @@ func rewriteValueRISCV64_OpGeq64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64U x y) // result: (Not (Less64U x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) v0.AddArg(x) @@ -1345,13 +1443,15 @@ func rewriteValueRISCV64_OpGeq64U_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (Not (Less8 x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) v0.AddArg(x) @@ -1361,13 +1461,15 @@ func rewriteValueRISCV64_OpGeq8_0(v *Value) bool { } } func rewriteValueRISCV64_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (Not (Less8U x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) v0.AddArg(x) @@ -1401,11 +1503,13 @@ func rewriteValueRISCV64_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater16 x y) // result: (Less16 y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess16) v.AddArg(y) v.AddArg(x) @@ -1413,11 +1517,13 @@ func rewriteValueRISCV64_OpGreater16_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater16U x y) // result: (Less16U y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess16U) v.AddArg(y) v.AddArg(x) @@ -1425,11 +1531,13 @@ func rewriteValueRISCV64_OpGreater16U_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32 x y) // result: (Less32 y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess32) v.AddArg(y) v.AddArg(x) @@ -1437,11 +1545,13 @@ func rewriteValueRISCV64_OpGreater32_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32F x y) // result: (FLTS y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLTS) v.AddArg(y) v.AddArg(x) @@ -1449,11 +1559,13 @@ func rewriteValueRISCV64_OpGreater32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32U x y) // result: (Less32U y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess32U) v.AddArg(y) v.AddArg(x) @@ -1461,11 +1573,13 @@ func rewriteValueRISCV64_OpGreater32U_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64 x y) // result: (Less64 y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess64) v.AddArg(y) v.AddArg(x) @@ -1473,11 +1587,13 @@ func rewriteValueRISCV64_OpGreater64_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64F x y) // result: (FLTD y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLTD) v.AddArg(y) v.AddArg(x) @@ -1485,11 +1601,13 @@ func rewriteValueRISCV64_OpGreater64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64U x y) // result: (Less64U y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess64U) v.AddArg(y) v.AddArg(x) @@ -1497,11 +1615,13 @@ func rewriteValueRISCV64_OpGreater64U_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater8 x y) // result: (Less8 y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess8) v.AddArg(y) v.AddArg(x) @@ -1509,11 +1629,13 @@ func rewriteValueRISCV64_OpGreater8_0(v *Value) bool { } } func rewriteValueRISCV64_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater8U x y) // result: (Less8U y x) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLess8U) v.AddArg(y) v.AddArg(x) @@ -1521,13 +1643,15 @@ func rewriteValueRISCV64_OpGreater8U_0(v *Value) bool { } } func rewriteValueRISCV64_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32 x y) // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRAI) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) @@ -1542,13 +1666,15 @@ func rewriteValueRISCV64_OpHmul32_0(v *Value) bool { } } func rewriteValueRISCV64_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32u x y) // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRLI) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) @@ -1563,11 +1689,13 @@ func rewriteValueRISCV64_OpHmul32u_0(v *Value) bool { } } func rewriteValueRISCV64_OpHmul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64 x y) // result: (MULH x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64MULH) v.AddArg(x) v.AddArg(y) @@ -1575,11 +1703,13 @@ func rewriteValueRISCV64_OpHmul64_0(v *Value) bool { } } func rewriteValueRISCV64_OpHmul64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64u x y) // result: (MULHU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64MULHU) v.AddArg(x) v.AddArg(y) @@ -1587,12 +1717,14 @@ func rewriteValueRISCV64_OpHmul64u_0(v *Value) bool { } } func rewriteValueRISCV64_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpRISCV64CALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -1601,11 +1733,13 @@ func rewriteValueRISCV64_OpInterCall_0(v *Value) bool { } } func rewriteValueRISCV64_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsInBounds idx len) // result: (Less64U idx len) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpLess64U) v.AddArg(idx) v.AddArg(len) @@ -1613,12 +1747,13 @@ func rewriteValueRISCV64_OpIsInBounds_0(v *Value) bool { } } func rewriteValueRISCV64_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsNonNil p) // result: (NeqPtr (MOVDconst) p) for { - p := v.Args[0] + p := v_0 v.reset(OpNeqPtr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v.AddArg(v0) @@ -1627,11 +1762,13 @@ func rewriteValueRISCV64_OpIsNonNil_0(v *Value) bool { } } func rewriteValueRISCV64_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsSliceInBounds idx len) // result: (Leq64U idx len) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpLeq64U) v.AddArg(idx) v.AddArg(len) @@ -1639,13 +1776,15 @@ func rewriteValueRISCV64_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (Not (Less16 y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) v0.AddArg(y) @@ -1655,13 +1794,15 @@ func rewriteValueRISCV64_OpLeq16_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (Not (Less16U y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) v0.AddArg(y) @@ -1671,13 +1812,15 @@ func rewriteValueRISCV64_OpLeq16U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32 x y) // result: (Not (Less32 y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v0.AddArg(y) @@ -1687,11 +1830,13 @@ func rewriteValueRISCV64_OpLeq32_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Leq32F x y) // result: (FLES x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLES) v.AddArg(x) v.AddArg(y) @@ -1699,13 +1844,15 @@ func rewriteValueRISCV64_OpLeq32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32U x y) // result: (Not (Less32U y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v0.AddArg(y) @@ -1715,13 +1862,15 @@ func rewriteValueRISCV64_OpLeq32U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64 x y) // result: (Not (Less64 y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) v0.AddArg(y) @@ -1731,11 +1880,13 @@ func rewriteValueRISCV64_OpLeq64_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Leq64F x y) // result: (FLED x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLED) v.AddArg(x) v.AddArg(y) @@ -1743,13 +1894,15 @@ func rewriteValueRISCV64_OpLeq64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64U x y) // result: (Not (Less64U y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) v0.AddArg(y) @@ -1759,13 +1912,15 @@ func rewriteValueRISCV64_OpLeq64U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (Not (Less8 y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) v0.AddArg(y) @@ -1775,13 +1930,15 @@ func rewriteValueRISCV64_OpLeq8_0(v *Value) bool { } } func rewriteValueRISCV64_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (Not (Less8U y x)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) v0.AddArg(y) @@ -1791,13 +1948,15 @@ func rewriteValueRISCV64_OpLeq8U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (SLT (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -1809,13 +1968,15 @@ func rewriteValueRISCV64_OpLess16_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1827,13 +1988,15 @@ func rewriteValueRISCV64_OpLess16U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32 x y) // result: (SLT (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1845,11 +2008,13 @@ func rewriteValueRISCV64_OpLess32_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less32F x y) // result: (FLTS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLTS) v.AddArg(x) v.AddArg(y) @@ -1857,13 +2022,15 @@ func rewriteValueRISCV64_OpLess32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32U x y) // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1875,11 +2042,13 @@ func rewriteValueRISCV64_OpLess32U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64 x y) // result: (SLT x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLT) v.AddArg(x) v.AddArg(y) @@ -1887,11 +2056,13 @@ func rewriteValueRISCV64_OpLess64_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64F x y) // result: (FLTD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FLTD) v.AddArg(x) v.AddArg(y) @@ -1899,11 +2070,13 @@ func rewriteValueRISCV64_OpLess64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64U x y) // result: (SLTU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLTU) v.AddArg(x) v.AddArg(y) @@ -1911,13 +2084,15 @@ func rewriteValueRISCV64_OpLess64U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (SLT (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLT) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -1929,13 +2104,15 @@ func rewriteValueRISCV64_OpLess8_0(v *Value) bool { } } func rewriteValueRISCV64_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SLTU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -1947,13 +2124,15 @@ func rewriteValueRISCV64_OpLess8U_0(v *Value) bool { } } func rewriteValueRISCV64_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: t.IsBoolean() // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean()) { break } @@ -1967,8 +2146,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -1982,8 +2161,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVBUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && !isSigned(t)) { break } @@ -1997,8 +2176,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -2012,8 +2191,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVHUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -2027,8 +2206,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && isSigned(t)) { break } @@ -2042,8 +2221,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVWUload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && !isSigned(t)) { break } @@ -2057,8 +2236,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } @@ -2072,8 +2251,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (FMOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -2087,8 +2266,8 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { // result: (FMOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -2100,12 +2279,12 @@ func rewriteValueRISCV64_OpLoad_0(v *Value) bool { return false } func rewriteValueRISCV64_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpRISCV64MOVaddr) v.Aux = sym v.AddArg(base) @@ -2113,14 +2292,16 @@ func rewriteValueRISCV64_OpLocalAddr_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2138,14 +2319,16 @@ func rewriteValueRISCV64_OpLsh16x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2163,13 +2346,15 @@ func rewriteValueRISCV64_OpLsh16x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x64 x y) // result: (AND (SLL x y) (Neg16 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2185,14 +2370,16 @@ func rewriteValueRISCV64_OpLsh16x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2210,14 +2397,16 @@ func rewriteValueRISCV64_OpLsh16x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2235,14 +2424,16 @@ func rewriteValueRISCV64_OpLsh32x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2260,13 +2451,15 @@ func rewriteValueRISCV64_OpLsh32x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x64 x y) // result: (AND (SLL x y) (Neg32 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2282,14 +2475,16 @@ func rewriteValueRISCV64_OpLsh32x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2307,14 +2502,16 @@ func rewriteValueRISCV64_OpLsh32x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2332,14 +2529,16 @@ func rewriteValueRISCV64_OpLsh64x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2357,13 +2556,15 @@ func rewriteValueRISCV64_OpLsh64x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x64 x y) // result: (AND (SLL x y) (Neg64 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2379,14 +2580,16 @@ func rewriteValueRISCV64_OpLsh64x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2404,14 +2607,16 @@ func rewriteValueRISCV64_OpLsh64x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2429,14 +2634,16 @@ func rewriteValueRISCV64_OpLsh8x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2454,13 +2661,15 @@ func rewriteValueRISCV64_OpLsh8x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x64 x y) // result: (AND (SLL x y) (Neg8 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2476,14 +2685,16 @@ func rewriteValueRISCV64_OpLsh8x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg(x) @@ -2501,13 +2712,15 @@ func rewriteValueRISCV64_OpLsh8x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (REMW (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -2519,13 +2732,15 @@ func rewriteValueRISCV64_OpMod16_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMUW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -2537,11 +2752,13 @@ func rewriteValueRISCV64_OpMod16u_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod32 x y) // result: (REMW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMW) v.AddArg(x) v.AddArg(y) @@ -2549,11 +2766,13 @@ func rewriteValueRISCV64_OpMod32_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod32u x y) // result: (REMUW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMUW) v.AddArg(x) v.AddArg(y) @@ -2561,11 +2780,13 @@ func rewriteValueRISCV64_OpMod32u_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64 x y) // result: (REM x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REM) v.AddArg(x) v.AddArg(y) @@ -2573,11 +2794,13 @@ func rewriteValueRISCV64_OpMod64_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64u x y) // result: (REMU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMU) v.AddArg(x) v.AddArg(y) @@ -2585,13 +2808,15 @@ func rewriteValueRISCV64_OpMod64u_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (REMW (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -2603,13 +2828,15 @@ func rewriteValueRISCV64_OpMod8_0(v *Value) bool { } } func rewriteValueRISCV64_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64REMUW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) @@ -2621,6 +2848,9 @@ func rewriteValueRISCV64_OpMod8u_0(v *Value) bool { } } func rewriteValueRISCV64_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -2630,7 +2860,7 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -2642,9 +2872,9 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpRISCV64MOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) @@ -2660,9 +2890,9 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpRISCV64MOVHstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) @@ -2678,9 +2908,9 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpRISCV64MOVWstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) @@ -2696,9 +2926,9 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpRISCV64MOVDstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) @@ -2713,9 +2943,9 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpRISCV64LoweredMove) v.AuxInt = t.(*types.Type).Alignment() v.AddArg(dst) @@ -2729,13 +2959,15 @@ func rewriteValueRISCV64_OpMove_0(v *Value) bool { } } func rewriteValueRISCV64_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul16 x y) // result: (MULW (SignExt16to32 x) (SignExt16to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64MULW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -2747,11 +2979,13 @@ func rewriteValueRISCV64_OpMul16_0(v *Value) bool { } } func rewriteValueRISCV64_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MULW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64MULW) v.AddArg(x) v.AddArg(y) @@ -2759,11 +2993,13 @@ func rewriteValueRISCV64_OpMul32_0(v *Value) bool { } } func rewriteValueRISCV64_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (FMULS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FMULS) v.AddArg(x) v.AddArg(y) @@ -2771,11 +3007,13 @@ func rewriteValueRISCV64_OpMul32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64 x y) // result: (MUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64MUL) v.AddArg(x) v.AddArg(y) @@ -2783,11 +3021,13 @@ func rewriteValueRISCV64_OpMul64_0(v *Value) bool { } } func rewriteValueRISCV64_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (FMULD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FMULD) v.AddArg(x) v.AddArg(y) @@ -2795,13 +3035,15 @@ func rewriteValueRISCV64_OpMul64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul8 x y) // result: (MULW (SignExt8to32 x) (SignExt8to32 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64MULW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -2813,12 +3055,13 @@ func rewriteValueRISCV64_OpMul8_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg16 x) // result: (SUB (MOVHconst) x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) v.AddArg(v0) @@ -2827,12 +3070,13 @@ func rewriteValueRISCV64_OpNeg16_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg32 x) // result: (SUB (MOVWconst) x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) v.AddArg(v0) @@ -2841,22 +3085,24 @@ func rewriteValueRISCV64_OpNeg32_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (FNEGS x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FNEGS) v.AddArg(x) return true } } func rewriteValueRISCV64_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg64 x) // result: (SUB (MOVDconst) x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v.AddArg(v0) @@ -2865,22 +3111,24 @@ func rewriteValueRISCV64_OpNeg64_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (FNEGD x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FNEGD) v.AddArg(x) return true } } func rewriteValueRISCV64_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg8 x) // result: (SUB (MOVBconst) x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SUB) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) v.AddArg(v0) @@ -2889,13 +3137,15 @@ func rewriteValueRISCV64_OpNeg8_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (SNEZ (ZeroExt16to64 (SUB x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) @@ -2907,13 +3157,15 @@ func rewriteValueRISCV64_OpNeq16_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32 x y) // result: (SNEZ (ZeroExt32to64 (SUB x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) @@ -2925,11 +3177,13 @@ func rewriteValueRISCV64_OpNeq32_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq32F x y) // result: (FNES x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FNES) v.AddArg(x) v.AddArg(y) @@ -2937,12 +3191,14 @@ func rewriteValueRISCV64_OpNeq32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Neq64 x y) // result: (SNEZ (SUB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) v0.AddArg(x) @@ -2952,11 +3208,13 @@ func rewriteValueRISCV64_OpNeq64_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq64F x y) // result: (FNED x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FNED) v.AddArg(x) v.AddArg(y) @@ -2964,13 +3222,15 @@ func rewriteValueRISCV64_OpNeq64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (SNEZ (ZeroExt8to64 (SUB x y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) @@ -2982,11 +3242,13 @@ func rewriteValueRISCV64_OpNeq8_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64XOR) v.AddArg(x) v.AddArg(y) @@ -2994,12 +3256,14 @@ func rewriteValueRISCV64_OpNeqB_0(v *Value) bool { } } func rewriteValueRISCV64_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (SNEZ (SUB x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) v0.AddArg(x) @@ -3009,11 +3273,13 @@ func rewriteValueRISCV64_OpNeqPtr_0(v *Value) bool { } } func rewriteValueRISCV64_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpRISCV64LoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -3021,10 +3287,11 @@ func rewriteValueRISCV64_OpNilCheck_0(v *Value) bool { } } func rewriteValueRISCV64_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORI [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64XORI) v.AuxInt = 1 v.AddArg(x) @@ -3032,13 +3299,14 @@ func rewriteValueRISCV64_OpNot_0(v *Value) bool { } } func rewriteValueRISCV64_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OffPtr [off] ptr:(SP)) // result: (MOVaddr [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if ptr.Op != OpSP { break } @@ -3052,7 +3320,7 @@ func rewriteValueRISCV64_OpOffPtr_0(v *Value) bool { // result: (ADDI [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if !(is32Bit(off)) { break } @@ -3065,7 +3333,7 @@ func rewriteValueRISCV64_OpOffPtr_0(v *Value) bool { // result: (ADD (MOVDconst [off]) ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpRISCV64ADD) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v0.AuxInt = off @@ -3075,11 +3343,13 @@ func rewriteValueRISCV64_OpOffPtr_0(v *Value) bool { } } func rewriteValueRISCV64_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64OR) v.AddArg(x) v.AddArg(y) @@ -3087,11 +3357,13 @@ func rewriteValueRISCV64_OpOr16_0(v *Value) bool { } } func rewriteValueRISCV64_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64OR) v.AddArg(x) v.AddArg(y) @@ -3099,11 +3371,13 @@ func rewriteValueRISCV64_OpOr32_0(v *Value) bool { } } func rewriteValueRISCV64_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or64 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64OR) v.AddArg(x) v.AddArg(y) @@ -3111,11 +3385,13 @@ func rewriteValueRISCV64_OpOr64_0(v *Value) bool { } } func rewriteValueRISCV64_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64OR) v.AddArg(x) v.AddArg(y) @@ -3123,11 +3399,13 @@ func rewriteValueRISCV64_OpOr8_0(v *Value) bool { } } func rewriteValueRISCV64_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64OR) v.AddArg(x) v.AddArg(y) @@ -3135,14 +3413,17 @@ func rewriteValueRISCV64_OpOrB_0(v *Value) bool { } } func rewriteValueRISCV64_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -3158,9 +3439,9 @@ func rewriteValueRISCV64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -3176,9 +3457,9 @@ func rewriteValueRISCV64_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -3192,18 +3473,18 @@ func rewriteValueRISCV64_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64ADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADD (MOVDconst [off]) ptr) // cond: is32Bit(off) // result: (ADDI [off] ptr) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpRISCV64MOVDconst { continue } off := v_0.AuxInt - ptr := v.Args[1^_i0] + ptr := v_1 if !(is32Bit(off)) { continue } @@ -3217,12 +3498,12 @@ func rewriteValueRISCV64_OpRISCV64ADD_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64ADDI_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDI [c] (MOVaddr [d] {s} x)) // cond: is32Bit(c+d) // result: (MOVaddr [c+d] {s} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } @@ -3244,7 +3525,7 @@ func rewriteValueRISCV64_OpRISCV64ADDI_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -3253,20 +3534,21 @@ func rewriteValueRISCV64_OpRISCV64ADDI_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVBUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3283,13 +3565,12 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3303,20 +3584,21 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3333,13 +3615,12 @@ func rewriteValueRISCV64_OpRISCV64MOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3353,21 +3634,23 @@ func rewriteValueRISCV64_OpRISCV64MOVBload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3385,14 +3668,13 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3454,20 +3736,21 @@ func rewriteValueRISCV64_OpRISCV64MOVDconst_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3484,13 +3767,12 @@ func rewriteValueRISCV64_OpRISCV64MOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3504,21 +3786,23 @@ func rewriteValueRISCV64_OpRISCV64MOVDload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3536,14 +3820,13 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3558,20 +3841,21 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVHUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3588,13 +3872,12 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3608,20 +3891,21 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3638,13 +3922,12 @@ func rewriteValueRISCV64_OpRISCV64MOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3658,21 +3941,23 @@ func rewriteValueRISCV64_OpRISCV64MOVHload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3690,14 +3975,13 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3712,20 +3996,21 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVWUload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3742,13 +4027,12 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3762,20 +4046,21 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3792,13 +4077,12 @@ func rewriteValueRISCV64_OpRISCV64MOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1 + off2)) { break } @@ -3812,21 +4096,23 @@ func rewriteValueRISCV64_OpRISCV64MOVWload_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRISCV64MOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64MOVaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -3844,14 +4130,13 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpRISCV64ADDI { break } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1 + off2)) { break } @@ -3866,15 +4151,15 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVHconst [c])) // result: (Or16 (Lsh16x64 x (MOVHconst [c&15])) (Rsh16Ux64 x (MOVHconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpRISCV64MOVHconst { break } @@ -3897,15 +4182,15 @@ func rewriteValueRISCV64_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft32 x (MOVWconst [c])) // result: (Or32 (Lsh32x64 x (MOVWconst [c&31])) (Rsh32Ux64 x (MOVWconst [-c&31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpRISCV64MOVWconst { break } @@ -3928,15 +4213,15 @@ func rewriteValueRISCV64_OpRotateLeft32_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft64 x (MOVDconst [c])) // result: (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpRISCV64MOVDconst { break } @@ -3959,15 +4244,15 @@ func rewriteValueRISCV64_OpRotateLeft64_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVBconst [c])) // result: (Or8 (Lsh8x64 x (MOVBconst [c&7])) (Rsh8Ux64 x (MOVBconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpRISCV64MOVBconst { break } @@ -3990,10 +4275,11 @@ func rewriteValueRISCV64_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueRISCV64_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4001,10 +4287,11 @@ func rewriteValueRISCV64_OpRound32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4012,14 +4299,16 @@ func rewriteValueRISCV64_OpRound64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -4039,14 +4328,16 @@ func rewriteValueRISCV64_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -4066,14 +4357,16 @@ func rewriteValueRISCV64_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -4091,14 +4384,16 @@ func rewriteValueRISCV64_OpRsh16Ux64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -4118,14 +4413,16 @@ func rewriteValueRISCV64_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -4147,14 +4444,16 @@ func rewriteValueRISCV64_OpRsh16x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -4176,14 +4475,16 @@ func rewriteValueRISCV64_OpRsh16x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -4203,14 +4504,16 @@ func rewriteValueRISCV64_OpRsh16x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -4232,14 +4535,16 @@ func rewriteValueRISCV64_OpRsh16x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -4259,14 +4564,16 @@ func rewriteValueRISCV64_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -4286,14 +4593,16 @@ func rewriteValueRISCV64_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -4311,14 +4620,16 @@ func rewriteValueRISCV64_OpRsh32Ux64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -4338,14 +4649,16 @@ func rewriteValueRISCV64_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -4367,14 +4680,16 @@ func rewriteValueRISCV64_OpRsh32x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -4396,14 +4711,16 @@ func rewriteValueRISCV64_OpRsh32x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x y) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -4423,14 +4740,16 @@ func rewriteValueRISCV64_OpRsh32x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -4452,14 +4771,16 @@ func rewriteValueRISCV64_OpRsh32x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg(x) @@ -4477,14 +4798,16 @@ func rewriteValueRISCV64_OpRsh64Ux16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg(x) @@ -4502,13 +4825,15 @@ func rewriteValueRISCV64_OpRsh64Ux32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux64 x y) // result: (AND (SRL x y) (Neg64 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg(x) @@ -4524,14 +4849,16 @@ func rewriteValueRISCV64_OpRsh64Ux64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg(x) @@ -4549,14 +4876,16 @@ func rewriteValueRISCV64_OpRsh64Ux8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v.AddArg(x) @@ -4576,14 +4905,16 @@ func rewriteValueRISCV64_OpRsh64x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x y) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v.AddArg(x) @@ -4603,13 +4934,15 @@ func rewriteValueRISCV64_OpRsh64x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 x y) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v.AddArg(x) @@ -4627,14 +4960,16 @@ func rewriteValueRISCV64_OpRsh64x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v.AddArg(x) @@ -4654,14 +4989,16 @@ func rewriteValueRISCV64_OpRsh64x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -4681,14 +5018,16 @@ func rewriteValueRISCV64_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -4708,14 +5047,16 @@ func rewriteValueRISCV64_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -4733,14 +5074,16 @@ func rewriteValueRISCV64_OpRsh8Ux64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -4760,14 +5103,16 @@ func rewriteValueRISCV64_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -4789,14 +5134,16 @@ func rewriteValueRISCV64_OpRsh8x16_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -4818,14 +5165,16 @@ func rewriteValueRISCV64_OpRsh8x32_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -4845,14 +5194,16 @@ func rewriteValueRISCV64_OpRsh8x64_0(v *Value) bool { } } func rewriteValueRISCV64_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -4874,12 +5225,13 @@ func rewriteValueRISCV64_OpRsh8x8_0(v *Value) bool { } } func rewriteValueRISCV64_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SignExt16to32 x) // result: (SRAI [48] (SLLI [48] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRAI) v.AuxInt = 48 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -4890,12 +5242,13 @@ func rewriteValueRISCV64_OpSignExt16to32_0(v *Value) bool { } } func rewriteValueRISCV64_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SignExt16to64 x) // result: (SRAI [48] (SLLI [48] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRAI) v.AuxInt = 48 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -4906,12 +5259,13 @@ func rewriteValueRISCV64_OpSignExt16to64_0(v *Value) bool { } } func rewriteValueRISCV64_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SignExt32to64 x) // result: (SRAI [32] (SLLI [32] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRAI) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -4922,12 +5276,13 @@ func rewriteValueRISCV64_OpSignExt32to64_0(v *Value) bool { } } func rewriteValueRISCV64_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SignExt8to16 x) // result: (SRAI [56] (SLLI [56] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRAI) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -4938,12 +5293,13 @@ func rewriteValueRISCV64_OpSignExt8to16_0(v *Value) bool { } } func rewriteValueRISCV64_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SignExt8to32 x) // result: (SRAI [56] (SLLI [56] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRAI) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -4954,12 +5310,13 @@ func rewriteValueRISCV64_OpSignExt8to32_0(v *Value) bool { } } func rewriteValueRISCV64_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SignExt8to64 x) // result: (SRAI [56] (SLLI [56] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRAI) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -4970,13 +5327,14 @@ func rewriteValueRISCV64_OpSignExt8to64_0(v *Value) bool { } } func rewriteValueRISCV64_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Slicemask x) // result: (XOR (MOVDconst [-1]) (SRA (SUB x (MOVDconst [1])) (MOVDconst [63]))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64XOR) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v0.AuxInt = -1 @@ -4996,22 +5354,24 @@ func rewriteValueRISCV64_OpSlicemask_0(v *Value) bool { } } func rewriteValueRISCV64_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (FSQRTD x) for { - x := v.Args[0] + x := v_0 v.reset(OpRISCV64FSQRTD) v.AddArg(x) return true } } func rewriteValueRISCV64_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpRISCV64CALLstatic) v.AuxInt = argwid v.Aux = target @@ -5020,14 +5380,17 @@ func rewriteValueRISCV64_OpStaticCall_0(v *Value) bool { } } func rewriteValueRISCV64_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 1 // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -5042,9 +5405,9 @@ func rewriteValueRISCV64_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -5059,9 +5422,9 @@ func rewriteValueRISCV64_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { break } @@ -5076,9 +5439,9 @@ func rewriteValueRISCV64_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { break } @@ -5093,9 +5456,9 @@ func rewriteValueRISCV64_OpStore_0(v *Value) bool { // result: (FMOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -5110,9 +5473,9 @@ func rewriteValueRISCV64_OpStore_0(v *Value) bool { // result: (FMOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -5125,11 +5488,13 @@ func rewriteValueRISCV64_OpStore_0(v *Value) bool { return false } func rewriteValueRISCV64_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SUB) v.AddArg(x) v.AddArg(y) @@ -5137,11 +5502,13 @@ func rewriteValueRISCV64_OpSub16_0(v *Value) bool { } } func rewriteValueRISCV64_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SUB) v.AddArg(x) v.AddArg(y) @@ -5149,11 +5516,13 @@ func rewriteValueRISCV64_OpSub32_0(v *Value) bool { } } func rewriteValueRISCV64_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (FSUBS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FSUBS) v.AddArg(x) v.AddArg(y) @@ -5161,11 +5530,13 @@ func rewriteValueRISCV64_OpSub32F_0(v *Value) bool { } } func rewriteValueRISCV64_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SUB) v.AddArg(x) v.AddArg(y) @@ -5173,11 +5544,13 @@ func rewriteValueRISCV64_OpSub64_0(v *Value) bool { } } func rewriteValueRISCV64_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (FSUBD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64FSUBD) v.AddArg(x) v.AddArg(y) @@ -5185,11 +5558,13 @@ func rewriteValueRISCV64_OpSub64F_0(v *Value) bool { } } func rewriteValueRISCV64_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SUB) v.AddArg(x) v.AddArg(y) @@ -5197,11 +5572,13 @@ func rewriteValueRISCV64_OpSub8_0(v *Value) bool { } } func rewriteValueRISCV64_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64SUB) v.AddArg(x) v.AddArg(y) @@ -5209,10 +5586,11 @@ func rewriteValueRISCV64_OpSubPtr_0(v *Value) bool { } } func rewriteValueRISCV64_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5220,10 +5598,11 @@ func rewriteValueRISCV64_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueRISCV64_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5231,10 +5610,11 @@ func rewriteValueRISCV64_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueRISCV64_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5242,10 +5622,11 @@ func rewriteValueRISCV64_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueRISCV64_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5253,10 +5634,11 @@ func rewriteValueRISCV64_OpTrunc64to16_0(v *Value) bool { } } func rewriteValueRISCV64_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5264,10 +5646,11 @@ func rewriteValueRISCV64_OpTrunc64to32_0(v *Value) bool { } } func rewriteValueRISCV64_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5275,13 +5658,16 @@ func rewriteValueRISCV64_OpTrunc64to8_0(v *Value) bool { } } func rewriteValueRISCV64_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpRISCV64LoweredWB) v.Aux = fn v.AddArg(destptr) @@ -5291,11 +5677,13 @@ func rewriteValueRISCV64_OpWB_0(v *Value) bool { } } func rewriteValueRISCV64_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64XOR) v.AddArg(x) v.AddArg(y) @@ -5303,11 +5691,13 @@ func rewriteValueRISCV64_OpXor16_0(v *Value) bool { } } func rewriteValueRISCV64_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64XOR) v.AddArg(x) v.AddArg(y) @@ -5315,11 +5705,13 @@ func rewriteValueRISCV64_OpXor32_0(v *Value) bool { } } func rewriteValueRISCV64_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor64 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64XOR) v.AddArg(x) v.AddArg(y) @@ -5327,11 +5719,13 @@ func rewriteValueRISCV64_OpXor64_0(v *Value) bool { } } func rewriteValueRISCV64_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRISCV64XOR) v.AddArg(x) v.AddArg(y) @@ -5339,6 +5733,8 @@ func rewriteValueRISCV64_OpXor8_0(v *Value) bool { } } func rewriteValueRISCV64_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -5348,7 +5744,7 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -5360,8 +5756,8 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpRISCV64MOVBstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) @@ -5375,8 +5771,8 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpRISCV64MOVHstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) @@ -5390,8 +5786,8 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpRISCV64MOVWstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) @@ -5405,8 +5801,8 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpRISCV64MOVDstore) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) @@ -5419,8 +5815,8 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { for { s := v.AuxInt t := v.Aux - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpRISCV64LoweredZero) v.AuxInt = t.(*types.Type).Alignment() v.AddArg(ptr) @@ -5435,12 +5831,13 @@ func rewriteValueRISCV64_OpZero_0(v *Value) bool { } } func rewriteValueRISCV64_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (ZeroExt16to32 x) // result: (SRLI [48] (SLLI [48] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRLI) v.AuxInt = 48 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -5451,12 +5848,13 @@ func rewriteValueRISCV64_OpZeroExt16to32_0(v *Value) bool { } } func rewriteValueRISCV64_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (ZeroExt16to64 x) // result: (SRLI [48] (SLLI [48] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRLI) v.AuxInt = 48 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -5467,12 +5865,13 @@ func rewriteValueRISCV64_OpZeroExt16to64_0(v *Value) bool { } } func rewriteValueRISCV64_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (ZeroExt32to64 x) // result: (SRLI [32] (SLLI [32] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRLI) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -5483,12 +5882,13 @@ func rewriteValueRISCV64_OpZeroExt32to64_0(v *Value) bool { } } func rewriteValueRISCV64_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (ZeroExt8to16 x) // result: (SRLI [56] (SLLI [56] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRLI) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -5499,12 +5899,13 @@ func rewriteValueRISCV64_OpZeroExt8to16_0(v *Value) bool { } } func rewriteValueRISCV64_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (ZeroExt8to32 x) // result: (SRLI [56] (SLLI [56] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRLI) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) @@ -5515,12 +5916,13 @@ func rewriteValueRISCV64_OpZeroExt8to32_0(v *Value) bool { } } func rewriteValueRISCV64_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (ZeroExt8to64 x) // result: (SRLI [56] (SLLI [56] x)) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpRISCV64SRLI) v.AuxInt = 56 v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index b04e1439bf..d2b917204b 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -812,11 +812,13 @@ func rewriteValueS390X(v *Value) bool { return false } func rewriteValueS390X_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (ADDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XADDW) v.AddArg(x) v.AddArg(y) @@ -824,11 +826,13 @@ func rewriteValueS390X_OpAdd16_0(v *Value) bool { } } func rewriteValueS390X_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (ADDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XADDW) v.AddArg(x) v.AddArg(y) @@ -836,11 +840,13 @@ func rewriteValueS390X_OpAdd32_0(v *Value) bool { } } func rewriteValueS390X_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (FADDS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFADDS) v.AddArg(x) v.AddArg(y) @@ -848,11 +854,13 @@ func rewriteValueS390X_OpAdd32F_0(v *Value) bool { } } func rewriteValueS390X_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64 x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XADD) v.AddArg(x) v.AddArg(y) @@ -860,11 +868,13 @@ func rewriteValueS390X_OpAdd64_0(v *Value) bool { } } func rewriteValueS390X_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (FADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFADD) v.AddArg(x) v.AddArg(y) @@ -872,11 +882,13 @@ func rewriteValueS390X_OpAdd64F_0(v *Value) bool { } } func rewriteValueS390X_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (ADDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XADDW) v.AddArg(x) v.AddArg(y) @@ -884,11 +896,13 @@ func rewriteValueS390X_OpAdd8_0(v *Value) bool { } } func rewriteValueS390X_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (ADD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XADD) v.AddArg(x) v.AddArg(y) @@ -896,11 +910,12 @@ func rewriteValueS390X_OpAddPtr_0(v *Value) bool { } } func rewriteValueS390X_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVDaddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpS390XMOVDaddr) v.Aux = sym v.AddArg(base) @@ -908,11 +923,13 @@ func rewriteValueS390X_OpAddr_0(v *Value) bool { } } func rewriteValueS390X_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (ANDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XANDW) v.AddArg(x) v.AddArg(y) @@ -920,11 +937,13 @@ func rewriteValueS390X_OpAnd16_0(v *Value) bool { } } func rewriteValueS390X_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (ANDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XANDW) v.AddArg(x) v.AddArg(y) @@ -932,11 +951,13 @@ func rewriteValueS390X_OpAnd32_0(v *Value) bool { } } func rewriteValueS390X_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And64 x y) // result: (AND x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XAND) v.AddArg(x) v.AddArg(y) @@ -944,11 +965,13 @@ func rewriteValueS390X_OpAnd64_0(v *Value) bool { } } func rewriteValueS390X_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (ANDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XANDW) v.AddArg(x) v.AddArg(y) @@ -956,11 +979,13 @@ func rewriteValueS390X_OpAnd8_0(v *Value) bool { } } func rewriteValueS390X_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (ANDW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XANDW) v.AddArg(x) v.AddArg(y) @@ -968,14 +993,17 @@ func rewriteValueS390X_OpAndB_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicAdd32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicAdd32 ptr val mem) // result: (AddTupleFirst32 val (LAA ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XAddTupleFirst32) v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem)) @@ -987,14 +1015,17 @@ func rewriteValueS390X_OpAtomicAdd32_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicAdd64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicAdd64 ptr val mem) // result: (AddTupleFirst64 val (LAAG ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XAddTupleFirst64) v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem)) @@ -1006,14 +1037,17 @@ func rewriteValueS390X_OpAtomicAdd64_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicAnd8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicAnd8 ptr val mem) // result: (LANfloor ptr (RLL (ORWconst val [-1<<8]) (RXSBG {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XLANfloor) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32) @@ -1034,13 +1068,17 @@ func rewriteValueS390X_OpAtomicAnd8_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicCompareAndSwap32_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap32 ptr old new_ mem) // result: (LoweredAtomicCas32 ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpS390XLoweredAtomicCas32) v.AddArg(ptr) v.AddArg(old) @@ -1050,13 +1088,17 @@ func rewriteValueS390X_OpAtomicCompareAndSwap32_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicCompareAndSwap64_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicCompareAndSwap64 ptr old new_ mem) // result: (LoweredAtomicCas64 ptr old new_ mem) for { - mem := v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 v.reset(OpS390XLoweredAtomicCas64) v.AddArg(ptr) v.AddArg(old) @@ -1066,12 +1108,15 @@ func rewriteValueS390X_OpAtomicCompareAndSwap64_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicExchange32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange32 ptr val mem) // result: (LoweredAtomicExchange32 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XLoweredAtomicExchange32) v.AddArg(ptr) v.AddArg(val) @@ -1080,12 +1125,15 @@ func rewriteValueS390X_OpAtomicExchange32_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicExchange64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicExchange64 ptr val mem) // result: (LoweredAtomicExchange64 ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XLoweredAtomicExchange64) v.AddArg(ptr) v.AddArg(val) @@ -1094,11 +1142,13 @@ func rewriteValueS390X_OpAtomicExchange64_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicLoad32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad32 ptr mem) // result: (MOVWZatomicload ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpS390XMOVWZatomicload) v.AddArg(ptr) v.AddArg(mem) @@ -1106,11 +1156,13 @@ func rewriteValueS390X_OpAtomicLoad32_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicLoad64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad64 ptr mem) // result: (MOVDatomicload ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpS390XMOVDatomicload) v.AddArg(ptr) v.AddArg(mem) @@ -1118,11 +1170,13 @@ func rewriteValueS390X_OpAtomicLoad64_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicLoad8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoad8 ptr mem) // result: (MOVBZatomicload ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpS390XMOVBZatomicload) v.AddArg(ptr) v.AddArg(mem) @@ -1130,11 +1184,13 @@ func rewriteValueS390X_OpAtomicLoad8_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicLoadAcq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadAcq32 ptr mem) // result: (MOVWZatomicload ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpS390XMOVWZatomicload) v.AddArg(ptr) v.AddArg(mem) @@ -1142,11 +1198,13 @@ func rewriteValueS390X_OpAtomicLoadAcq32_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicLoadPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicLoadPtr ptr mem) // result: (MOVDatomicload ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpS390XMOVDatomicload) v.AddArg(ptr) v.AddArg(mem) @@ -1154,14 +1212,17 @@ func rewriteValueS390X_OpAtomicLoadPtr_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicOr8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AtomicOr8 ptr val mem) // result: (LAOfloor ptr (SLW (MOVBZreg val) (RXSBG {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XLAOfloor) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32) @@ -1181,13 +1242,16 @@ func rewriteValueS390X_OpAtomicOr8_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicStore32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (AtomicStore32 ptr val mem) // result: (SYNC (MOVWatomicstore ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem) v0.AddArg(ptr) @@ -1198,13 +1262,16 @@ func rewriteValueS390X_OpAtomicStore32_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicStore64_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (AtomicStore64 ptr val mem) // result: (SYNC (MOVDatomicstore ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) v0.AddArg(ptr) @@ -1215,13 +1282,16 @@ func rewriteValueS390X_OpAtomicStore64_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicStore8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (AtomicStore8 ptr val mem) // result: (SYNC (MOVBatomicstore ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem) v0.AddArg(ptr) @@ -1232,13 +1302,16 @@ func rewriteValueS390X_OpAtomicStore8_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicStorePtrNoWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (AtomicStorePtrNoWB ptr val mem) // result: (SYNC (MOVDatomicstore ptr val mem)) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XSYNC) v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) v0.AddArg(ptr) @@ -1249,12 +1322,15 @@ func rewriteValueS390X_OpAtomicStorePtrNoWB_0(v *Value) bool { } } func rewriteValueS390X_OpAtomicStoreRel32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AtomicStoreRel32 ptr val mem) // result: (MOVWatomicstore ptr val mem) for { - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 v.reset(OpS390XMOVWatomicstore) v.AddArg(ptr) v.AddArg(val) @@ -1263,13 +1339,15 @@ func rewriteValueS390X_OpAtomicStoreRel32_0(v *Value) bool { } } func rewriteValueS390X_OpAvg64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Avg64u x y) // result: (ADD (SRDconst (SUB x y) [1]) y) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XADD) v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t) v0.AuxInt = 1 @@ -1283,12 +1361,13 @@ func rewriteValueS390X_OpAvg64u_0(v *Value) bool { } } func rewriteValueS390X_OpBitLen64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (SUB (MOVDconst [64]) (FLOGR x)) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 @@ -1300,30 +1379,33 @@ func rewriteValueS390X_OpBitLen64_0(v *Value) bool { } } func rewriteValueS390X_OpBswap32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Bswap32 x) // result: (MOVWBR x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVWBR) v.AddArg(x) return true } } func rewriteValueS390X_OpBswap64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Bswap64 x) // result: (MOVDBR x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVDBR) v.AddArg(x) return true } } func rewriteValueS390X_OpCeil_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ceil x) // result: (FIDBR [6] x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFIDBR) v.AuxInt = 6 v.AddArg(x) @@ -1331,13 +1413,16 @@ func rewriteValueS390X_OpCeil_0(v *Value) bool { } } func rewriteValueS390X_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpS390XCALLclosure) v.AuxInt = argwid v.AddArg(entry) @@ -1347,40 +1432,44 @@ func rewriteValueS390X_OpClosureCall_0(v *Value) bool { } } func rewriteValueS390X_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 x) // result: (NOTW x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNOTW) v.AddArg(x) return true } } func rewriteValueS390X_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 x) // result: (NOTW x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNOTW) v.AddArg(x) return true } } func rewriteValueS390X_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com64 x) // result: (NOT x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNOT) v.AddArg(x) return true } } func rewriteValueS390X_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 x) // result: (NOTW x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNOTW) v.AddArg(x) return true @@ -1466,13 +1555,14 @@ func rewriteValueS390X_OpConstNil_0(v *Value) bool { } } func rewriteValueS390X_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz32 x) // result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW (SUBWconst [1] x) (NOTW x))))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 @@ -1494,23 +1584,25 @@ func rewriteValueS390X_OpCtz32_0(v *Value) bool { } } func rewriteValueS390X_OpCtz32NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz32NonZero x) // result: (Ctz32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz32) v.AddArg(x) return true } } func rewriteValueS390X_OpCtz64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz64 x) // result: (SUB (MOVDconst [64]) (FLOGR (AND (SUBconst [1] x) (NOT x)))) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpS390XSUB) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = 64 @@ -1530,123 +1622,136 @@ func rewriteValueS390X_OpCtz64_0(v *Value) bool { } } func rewriteValueS390X_OpCtz64NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz64NonZero x) // result: (Ctz64 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz64) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (CFEBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCFEBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64 x) // result: (CGEBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCGEBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (LDEBR x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XLDEBR) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F x) // result: (CEFBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCEFBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F x) // result: (CDFBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCDFBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (CFDBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCFDBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (LEDBR x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XLEDBR) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64 x) // result: (CGDBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCGDBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to32F x) // result: (CEGBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCEGBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to64F x) // result: (CDGBRA x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XCDGBRA) v.AddArg(x) return true } } func rewriteValueS390X_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (DIVW (MOVHreg x) (MOVHreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) @@ -1658,13 +1763,15 @@ func rewriteValueS390X_OpDiv16_0(v *Value) bool { } } func rewriteValueS390X_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (DIVWU (MOVHZreg x) (MOVHZreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) @@ -1676,13 +1783,15 @@ func rewriteValueS390X_OpDiv16u_0(v *Value) bool { } } func rewriteValueS390X_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32 x y) // result: (DIVW (MOVWreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v0.AddArg(x) @@ -1692,11 +1801,13 @@ func rewriteValueS390X_OpDiv32_0(v *Value) bool { } } func rewriteValueS390X_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (FDIVS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFDIVS) v.AddArg(x) v.AddArg(y) @@ -1704,13 +1815,15 @@ func rewriteValueS390X_OpDiv32F_0(v *Value) bool { } } func rewriteValueS390X_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32u x y) // result: (DIVWU (MOVWZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v0.AddArg(x) @@ -1720,11 +1833,13 @@ func rewriteValueS390X_OpDiv32u_0(v *Value) bool { } } func rewriteValueS390X_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64 x y) // result: (DIVD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVD) v.AddArg(x) v.AddArg(y) @@ -1732,11 +1847,13 @@ func rewriteValueS390X_OpDiv64_0(v *Value) bool { } } func rewriteValueS390X_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (FDIV x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFDIV) v.AddArg(x) v.AddArg(y) @@ -1744,11 +1861,13 @@ func rewriteValueS390X_OpDiv64F_0(v *Value) bool { } } func rewriteValueS390X_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64u x y) // result: (DIVDU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVDU) v.AddArg(x) v.AddArg(y) @@ -1756,13 +1875,15 @@ func rewriteValueS390X_OpDiv64u_0(v *Value) bool { } } func rewriteValueS390X_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (DIVW (MOVBreg x) (MOVBreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) @@ -1774,13 +1895,15 @@ func rewriteValueS390X_OpDiv8_0(v *Value) bool { } } func rewriteValueS390X_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (DIVWU (MOVBZreg x) (MOVBZreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XDIVWU) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) @@ -1792,13 +1915,15 @@ func rewriteValueS390X_OpDiv8u_0(v *Value) bool { } } func rewriteValueS390X_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1819,13 +1944,15 @@ func rewriteValueS390X_OpEq16_0(v *Value) bool { } } func rewriteValueS390X_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1842,13 +1969,15 @@ func rewriteValueS390X_OpEq32_0(v *Value) bool { } } func rewriteValueS390X_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32F x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1865,13 +1994,15 @@ func rewriteValueS390X_OpEq32F_0(v *Value) bool { } } func rewriteValueS390X_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq64 x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1888,13 +2019,15 @@ func rewriteValueS390X_OpEq64_0(v *Value) bool { } } func rewriteValueS390X_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq64F x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1911,13 +2044,15 @@ func rewriteValueS390X_OpEq64F_0(v *Value) bool { } } func rewriteValueS390X_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1938,13 +2073,15 @@ func rewriteValueS390X_OpEq8_0(v *Value) bool { } } func rewriteValueS390X_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1965,13 +2102,15 @@ func rewriteValueS390X_OpEqB_0(v *Value) bool { } } func rewriteValueS390X_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqPtr x y) // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Equal v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -1988,12 +2127,15 @@ func rewriteValueS390X_OpEqPtr_0(v *Value) bool { } } func rewriteValueS390X_OpFMA_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMA x y z) // result: (FMADD z x y) for { - z := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + z := v_2 v.reset(OpS390XFMADD) v.AddArg(z) v.AddArg(x) @@ -2002,10 +2144,11 @@ func rewriteValueS390X_OpFMA_0(v *Value) bool { } } func rewriteValueS390X_OpFloor_0(v *Value) bool { + v_0 := v.Args[0] // match: (Floor x) // result: (FIDBR [7] x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFIDBR) v.AuxInt = 7 v.AddArg(x) @@ -2013,13 +2156,15 @@ func rewriteValueS390X_OpFloor_0(v *Value) bool { } } func rewriteValueS390X_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2040,13 +2185,15 @@ func rewriteValueS390X_OpGeq16_0(v *Value) bool { } } func rewriteValueS390X_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2067,13 +2214,15 @@ func rewriteValueS390X_OpGeq16U_0(v *Value) bool { } } func rewriteValueS390X_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32 x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2090,13 +2239,15 @@ func rewriteValueS390X_OpGeq32_0(v *Value) bool { } } func rewriteValueS390X_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32F x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2113,13 +2264,15 @@ func rewriteValueS390X_OpGeq32F_0(v *Value) bool { } } func rewriteValueS390X_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32U x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2136,13 +2289,15 @@ func rewriteValueS390X_OpGeq32U_0(v *Value) bool { } } func rewriteValueS390X_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64 x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2159,13 +2314,15 @@ func rewriteValueS390X_OpGeq64_0(v *Value) bool { } } func rewriteValueS390X_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64F x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2182,13 +2339,15 @@ func rewriteValueS390X_OpGeq64F_0(v *Value) bool { } } func rewriteValueS390X_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64U x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2205,13 +2364,15 @@ func rewriteValueS390X_OpGeq64U_0(v *Value) bool { } } func rewriteValueS390X_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2232,13 +2393,15 @@ func rewriteValueS390X_OpGeq8_0(v *Value) bool { } } func rewriteValueS390X_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (LOCGR {s390x.GreaterOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.GreaterOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2283,23 +2446,26 @@ func rewriteValueS390X_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueS390X_OpGetG_0(v *Value) bool { + v_0 := v.Args[0] // match: (GetG mem) // result: (LoweredGetG mem) for { - mem := v.Args[0] + mem := v_0 v.reset(OpS390XLoweredGetG) v.AddArg(mem) return true } } func rewriteValueS390X_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2320,13 +2486,15 @@ func rewriteValueS390X_OpGreater16_0(v *Value) bool { } } func rewriteValueS390X_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2347,13 +2515,15 @@ func rewriteValueS390X_OpGreater16U_0(v *Value) bool { } } func rewriteValueS390X_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32 x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2370,13 +2540,15 @@ func rewriteValueS390X_OpGreater32_0(v *Value) bool { } } func rewriteValueS390X_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32F x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2393,13 +2565,15 @@ func rewriteValueS390X_OpGreater32F_0(v *Value) bool { } } func rewriteValueS390X_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32U x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2416,13 +2590,15 @@ func rewriteValueS390X_OpGreater32U_0(v *Value) bool { } } func rewriteValueS390X_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater64 x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2439,13 +2615,15 @@ func rewriteValueS390X_OpGreater64_0(v *Value) bool { } } func rewriteValueS390X_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater64F x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2462,13 +2640,15 @@ func rewriteValueS390X_OpGreater64F_0(v *Value) bool { } } func rewriteValueS390X_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater64U x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2485,13 +2665,15 @@ func rewriteValueS390X_OpGreater64U_0(v *Value) bool { } } func rewriteValueS390X_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2512,13 +2694,15 @@ func rewriteValueS390X_OpGreater8_0(v *Value) bool { } } func rewriteValueS390X_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (LOCGR {s390x.Greater} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Greater v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2539,13 +2723,15 @@ func rewriteValueS390X_OpGreater8U_0(v *Value) bool { } } func rewriteValueS390X_OpHmul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32 x y) // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRDconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) @@ -2560,13 +2746,15 @@ func rewriteValueS390X_OpHmul32_0(v *Value) bool { } } func rewriteValueS390X_OpHmul32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32u x y) // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRDconst) v.AuxInt = 32 v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) @@ -2581,11 +2769,13 @@ func rewriteValueS390X_OpHmul32u_0(v *Value) bool { } } func rewriteValueS390X_OpHmul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64 x y) // result: (MULHD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMULHD) v.AddArg(x) v.AddArg(y) @@ -2593,11 +2783,13 @@ func rewriteValueS390X_OpHmul64_0(v *Value) bool { } } func rewriteValueS390X_OpHmul64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Hmul64u x y) // result: (MULHDU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMULHDU) v.AddArg(x) v.AddArg(y) @@ -2605,10 +2797,10 @@ func rewriteValueS390X_OpHmul64u_0(v *Value) bool { } } func rewriteValueS390X_OpITab_0(v *Value) bool { + v_0 := v.Args[0] // match: (ITab (Load ptr mem)) // result: (MOVDload ptr mem) for { - v_0 := v.Args[0] if v_0.Op != OpLoad { break } @@ -2622,12 +2814,14 @@ func rewriteValueS390X_OpITab_0(v *Value) bool { return false } func rewriteValueS390X_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (CALLinter [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpS390XCALLinter) v.AuxInt = argwid v.AddArg(entry) @@ -2636,13 +2830,15 @@ func rewriteValueS390X_OpInterCall_0(v *Value) bool { } } func rewriteValueS390X_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsInBounds idx len) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2659,12 +2855,13 @@ func rewriteValueS390X_OpIsInBounds_0(v *Value) bool { } } func rewriteValueS390X_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsNonNil p) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0])) for { - p := v.Args[0] + p := v_0 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2681,13 +2878,15 @@ func rewriteValueS390X_OpIsNonNil_0(v *Value) bool { } } func rewriteValueS390X_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsSliceInBounds idx len) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2704,13 +2903,15 @@ func rewriteValueS390X_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueS390X_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2731,13 +2932,15 @@ func rewriteValueS390X_OpLeq16_0(v *Value) bool { } } func rewriteValueS390X_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2758,13 +2961,15 @@ func rewriteValueS390X_OpLeq16U_0(v *Value) bool { } } func rewriteValueS390X_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32 x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2781,13 +2986,15 @@ func rewriteValueS390X_OpLeq32_0(v *Value) bool { } } func rewriteValueS390X_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32F x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2804,13 +3011,15 @@ func rewriteValueS390X_OpLeq32F_0(v *Value) bool { } } func rewriteValueS390X_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32U x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2827,13 +3036,15 @@ func rewriteValueS390X_OpLeq32U_0(v *Value) bool { } } func rewriteValueS390X_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64 x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2850,13 +3061,15 @@ func rewriteValueS390X_OpLeq64_0(v *Value) bool { } } func rewriteValueS390X_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64F x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2873,13 +3086,15 @@ func rewriteValueS390X_OpLeq64F_0(v *Value) bool { } } func rewriteValueS390X_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64U x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2896,13 +3111,15 @@ func rewriteValueS390X_OpLeq64U_0(v *Value) bool { } } func rewriteValueS390X_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2923,13 +3140,15 @@ func rewriteValueS390X_OpLeq8_0(v *Value) bool { } } func rewriteValueS390X_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.LessOrEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2950,13 +3169,15 @@ func rewriteValueS390X_OpLeq8U_0(v *Value) bool { } } func rewriteValueS390X_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -2977,13 +3198,15 @@ func rewriteValueS390X_OpLess16_0(v *Value) bool { } } func rewriteValueS390X_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3004,13 +3227,15 @@ func rewriteValueS390X_OpLess16U_0(v *Value) bool { } } func rewriteValueS390X_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32 x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3027,13 +3252,15 @@ func rewriteValueS390X_OpLess32_0(v *Value) bool { } } func rewriteValueS390X_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32F x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3050,13 +3277,15 @@ func rewriteValueS390X_OpLess32F_0(v *Value) bool { } } func rewriteValueS390X_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32U x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3073,13 +3302,15 @@ func rewriteValueS390X_OpLess32U_0(v *Value) bool { } } func rewriteValueS390X_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less64 x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3096,13 +3327,15 @@ func rewriteValueS390X_OpLess64_0(v *Value) bool { } } func rewriteValueS390X_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less64F x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3119,13 +3352,15 @@ func rewriteValueS390X_OpLess64F_0(v *Value) bool { } } func rewriteValueS390X_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less64U x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3142,13 +3377,15 @@ func rewriteValueS390X_OpLess64U_0(v *Value) bool { } } func rewriteValueS390X_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3169,13 +3406,15 @@ func rewriteValueS390X_OpLess8_0(v *Value) bool { } } func rewriteValueS390X_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.Less v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -3196,13 +3435,15 @@ func rewriteValueS390X_OpLess8U_0(v *Value) bool { } } func rewriteValueS390X_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: (is64BitInt(t) || isPtr(t)) // result: (MOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } @@ -3216,8 +3457,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (MOVWload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && isSigned(t)) { break } @@ -3231,8 +3472,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (MOVWZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitInt(t) && !isSigned(t)) { break } @@ -3246,8 +3487,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (MOVHload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && isSigned(t)) { break } @@ -3261,8 +3502,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (MOVHZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is16BitInt(t) && !isSigned(t)) { break } @@ -3276,8 +3517,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (MOVBload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is8BitInt(t) && isSigned(t)) { break } @@ -3291,8 +3532,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (MOVBZload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) { break } @@ -3306,8 +3547,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (FMOVSload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -3321,8 +3562,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { // result: (FMOVDload ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -3334,12 +3575,12 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool { return false } func rewriteValueS390X_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (MOVDaddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpS390XMOVDaddr) v.Aux = sym v.AddArg(base) @@ -3347,14 +3588,16 @@ func rewriteValueS390X_OpLocalAddr_0(v *Value) bool { } } func rewriteValueS390X_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3367,8 +3610,8 @@ func rewriteValueS390X_OpLsh16x16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3389,14 +3632,16 @@ func rewriteValueS390X_OpLsh16x16_0(v *Value) bool { } } func rewriteValueS390X_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3409,8 +3654,8 @@ func rewriteValueS390X_OpLsh16x32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3429,14 +3674,16 @@ func rewriteValueS390X_OpLsh16x32_0(v *Value) bool { } } func rewriteValueS390X_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x64 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3449,8 +3696,8 @@ func rewriteValueS390X_OpLsh16x64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3469,14 +3716,16 @@ func rewriteValueS390X_OpLsh16x64_0(v *Value) bool { } } func rewriteValueS390X_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3489,8 +3738,8 @@ func rewriteValueS390X_OpLsh16x8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3511,14 +3760,16 @@ func rewriteValueS390X_OpLsh16x8_0(v *Value) bool { } } func rewriteValueS390X_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3531,8 +3782,8 @@ func rewriteValueS390X_OpLsh32x16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3553,14 +3804,16 @@ func rewriteValueS390X_OpLsh32x16_0(v *Value) bool { } } func rewriteValueS390X_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3573,8 +3826,8 @@ func rewriteValueS390X_OpLsh32x32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3593,14 +3846,16 @@ func rewriteValueS390X_OpLsh32x32_0(v *Value) bool { } } func rewriteValueS390X_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x64 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3613,8 +3868,8 @@ func rewriteValueS390X_OpLsh32x64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3633,14 +3888,16 @@ func rewriteValueS390X_OpLsh32x64_0(v *Value) bool { } } func rewriteValueS390X_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3653,8 +3910,8 @@ func rewriteValueS390X_OpLsh32x8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3675,14 +3932,16 @@ func rewriteValueS390X_OpLsh32x8_0(v *Value) bool { } } func rewriteValueS390X_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3695,8 +3954,8 @@ func rewriteValueS390X_OpLsh64x16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3717,14 +3976,16 @@ func rewriteValueS390X_OpLsh64x16_0(v *Value) bool { } } func rewriteValueS390X_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3737,8 +3998,8 @@ func rewriteValueS390X_OpLsh64x32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3757,14 +4018,16 @@ func rewriteValueS390X_OpLsh64x32_0(v *Value) bool { } } func rewriteValueS390X_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x64 x y) // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3777,8 +4040,8 @@ func rewriteValueS390X_OpLsh64x64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3797,14 +4060,16 @@ func rewriteValueS390X_OpLsh64x64_0(v *Value) bool { } } func rewriteValueS390X_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) // cond: shiftIsBounded(v) // result: (SLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3817,8 +4082,8 @@ func rewriteValueS390X_OpLsh64x8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3839,14 +4104,16 @@ func rewriteValueS390X_OpLsh64x8_0(v *Value) bool { } } func rewriteValueS390X_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3859,8 +4126,8 @@ func rewriteValueS390X_OpLsh8x16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3881,14 +4148,16 @@ func rewriteValueS390X_OpLsh8x16_0(v *Value) bool { } } func rewriteValueS390X_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3901,8 +4170,8 @@ func rewriteValueS390X_OpLsh8x32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3921,14 +4190,16 @@ func rewriteValueS390X_OpLsh8x32_0(v *Value) bool { } } func rewriteValueS390X_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x64 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3941,8 +4212,8 @@ func rewriteValueS390X_OpLsh8x64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -3961,14 +4232,16 @@ func rewriteValueS390X_OpLsh8x64_0(v *Value) bool { } } func rewriteValueS390X_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // cond: shiftIsBounded(v) // result: (SLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -3981,8 +4254,8 @@ func rewriteValueS390X_OpLsh8x8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -4003,13 +4276,15 @@ func rewriteValueS390X_OpLsh8x8_0(v *Value) bool { } } func rewriteValueS390X_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (MODW (MOVHreg x) (MOVHreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) @@ -4021,13 +4296,15 @@ func rewriteValueS390X_OpMod16_0(v *Value) bool { } } func rewriteValueS390X_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (MODWU (MOVHZreg x) (MOVHZreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) v0.AddArg(x) @@ -4039,13 +4316,15 @@ func rewriteValueS390X_OpMod16u_0(v *Value) bool { } } func rewriteValueS390X_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) // result: (MODW (MOVWreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) v0.AddArg(x) @@ -4055,13 +4334,15 @@ func rewriteValueS390X_OpMod32_0(v *Value) bool { } } func rewriteValueS390X_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (MODWU (MOVWZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) v0.AddArg(x) @@ -4071,11 +4352,13 @@ func rewriteValueS390X_OpMod32u_0(v *Value) bool { } } func rewriteValueS390X_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64 x y) // result: (MODD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODD) v.AddArg(x) v.AddArg(y) @@ -4083,11 +4366,13 @@ func rewriteValueS390X_OpMod64_0(v *Value) bool { } } func rewriteValueS390X_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64u x y) // result: (MODDU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODDU) v.AddArg(x) v.AddArg(y) @@ -4095,13 +4380,15 @@ func rewriteValueS390X_OpMod64u_0(v *Value) bool { } } func rewriteValueS390X_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (MODW (MOVBreg x) (MOVBreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) @@ -4113,13 +4400,15 @@ func rewriteValueS390X_OpMod8_0(v *Value) bool { } } func rewriteValueS390X_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (MODWU (MOVBZreg x) (MOVBZreg y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMODWU) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) @@ -4131,6 +4420,9 @@ func rewriteValueS390X_OpMod8u_0(v *Value) bool { } } func rewriteValueS390X_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -4139,7 +4431,7 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -4151,9 +4443,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVBstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) @@ -4169,9 +4461,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVHstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) @@ -4187,9 +4479,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVWstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) @@ -4205,9 +4497,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVDstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) @@ -4223,9 +4515,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVDstore) v.AuxInt = 8 v.AddArg(dst) @@ -4250,9 +4542,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 24 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVDstore) v.AuxInt = 16 v.AddArg(dst) @@ -4286,9 +4578,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 2 v.AddArg(dst) @@ -4313,9 +4605,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 4 v.AddArg(dst) @@ -4340,9 +4632,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVHstore) v.AuxInt = 4 v.AddArg(dst) @@ -4364,6 +4656,9 @@ func rewriteValueS390X_OpMove_0(v *Value) bool { return false } func rewriteValueS390X_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [7] dst src mem) @@ -4372,9 +4667,9 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = 6 v.AddArg(dst) @@ -4407,9 +4702,9 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { // result: (MVC [makeValAndOff(s, 0)] dst src mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 0 && s <= 256) { break } @@ -4425,9 +4720,9 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { // result: (MVC [makeValAndOff(s-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 256 && s <= 512) { break } @@ -4448,9 +4743,9 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { // result: (MVC [makeValAndOff(s-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 512 && s <= 768) { break } @@ -4476,9 +4771,9 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { // result: (MVC [makeValAndOff(s-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 768 && s <= 1024) { break } @@ -4509,9 +4804,9 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { // result: (LoweredMove [s%256] dst src (ADD src (MOVDconst [(s/256)*256])) mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 1024) { break } @@ -4531,11 +4826,13 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { return false } func rewriteValueS390X_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (MULLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMULLW) v.AddArg(x) v.AddArg(y) @@ -4543,11 +4840,13 @@ func rewriteValueS390X_OpMul16_0(v *Value) bool { } } func rewriteValueS390X_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (MULLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMULLW) v.AddArg(x) v.AddArg(y) @@ -4555,11 +4854,13 @@ func rewriteValueS390X_OpMul32_0(v *Value) bool { } } func rewriteValueS390X_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (FMULS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFMULS) v.AddArg(x) v.AddArg(y) @@ -4567,11 +4868,13 @@ func rewriteValueS390X_OpMul32F_0(v *Value) bool { } } func rewriteValueS390X_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64 x y) // result: (MULLD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMULLD) v.AddArg(x) v.AddArg(y) @@ -4579,11 +4882,13 @@ func rewriteValueS390X_OpMul64_0(v *Value) bool { } } func rewriteValueS390X_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (FMUL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFMUL) v.AddArg(x) v.AddArg(y) @@ -4591,11 +4896,13 @@ func rewriteValueS390X_OpMul64F_0(v *Value) bool { } } func rewriteValueS390X_OpMul64uhilo_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64uhilo x y) // result: (MLGR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMLGR) v.AddArg(x) v.AddArg(y) @@ -4603,11 +4910,13 @@ func rewriteValueS390X_OpMul64uhilo_0(v *Value) bool { } } func rewriteValueS390X_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (MULLW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XMULLW) v.AddArg(x) v.AddArg(y) @@ -4615,73 +4924,81 @@ func rewriteValueS390X_OpMul8_0(v *Value) bool { } } func rewriteValueS390X_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg16 x) // result: (NEGW x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNEGW) v.AddArg(x) return true } } func rewriteValueS390X_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32 x) // result: (NEGW x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNEGW) v.AddArg(x) return true } } func rewriteValueS390X_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (FNEGS x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFNEGS) v.AddArg(x) return true } } func rewriteValueS390X_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64 x) // result: (NEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNEG) v.AddArg(x) return true } } func rewriteValueS390X_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (FNEG x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFNEG) v.AddArg(x) return true } } func rewriteValueS390X_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg8 x) // result: (NEGW x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XNEGW) v.AddArg(x) return true } } func rewriteValueS390X_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4702,13 +5019,15 @@ func rewriteValueS390X_OpNeq16_0(v *Value) bool { } } func rewriteValueS390X_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32 x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4725,13 +5044,15 @@ func rewriteValueS390X_OpNeq32_0(v *Value) bool { } } func rewriteValueS390X_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32F x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4748,13 +5069,15 @@ func rewriteValueS390X_OpNeq32F_0(v *Value) bool { } } func rewriteValueS390X_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq64 x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4771,13 +5094,15 @@ func rewriteValueS390X_OpNeq64_0(v *Value) bool { } } func rewriteValueS390X_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq64F x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4794,13 +5119,15 @@ func rewriteValueS390X_OpNeq64F_0(v *Value) bool { } } func rewriteValueS390X_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4821,13 +5148,15 @@ func rewriteValueS390X_OpNeq8_0(v *Value) bool { } } func rewriteValueS390X_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NeqB x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4848,13 +5177,15 @@ func rewriteValueS390X_OpNeqB_0(v *Value) bool { } } func rewriteValueS390X_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NeqPtr x y) // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Aux = s390x.NotEqual v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) @@ -4871,11 +5202,13 @@ func rewriteValueS390X_OpNeqPtr_0(v *Value) bool { } } func rewriteValueS390X_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpS390XLoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -4883,10 +5216,11 @@ func rewriteValueS390X_OpNilCheck_0(v *Value) bool { } } func rewriteValueS390X_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (XORWconst [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XXORWconst) v.AuxInt = 1 v.AddArg(x) @@ -4894,13 +5228,14 @@ func rewriteValueS390X_OpNot_0(v *Value) bool { } } func rewriteValueS390X_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OffPtr [off] ptr:(SP)) // result: (MOVDaddr [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if ptr.Op != OpSP { break } @@ -4914,7 +5249,7 @@ func rewriteValueS390X_OpOffPtr_0(v *Value) bool { // result: (ADDconst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 if !(is32Bit(off)) { break } @@ -4927,7 +5262,7 @@ func rewriteValueS390X_OpOffPtr_0(v *Value) bool { // result: (ADD (MOVDconst [off]) ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpS390XADD) v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = off @@ -4937,11 +5272,13 @@ func rewriteValueS390X_OpOffPtr_0(v *Value) bool { } } func rewriteValueS390X_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (ORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XORW) v.AddArg(x) v.AddArg(y) @@ -4949,11 +5286,13 @@ func rewriteValueS390X_OpOr16_0(v *Value) bool { } } func rewriteValueS390X_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (ORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XORW) v.AddArg(x) v.AddArg(y) @@ -4961,11 +5300,13 @@ func rewriteValueS390X_OpOr32_0(v *Value) bool { } } func rewriteValueS390X_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or64 x y) // result: (OR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XOR) v.AddArg(x) v.AddArg(y) @@ -4973,11 +5314,13 @@ func rewriteValueS390X_OpOr64_0(v *Value) bool { } } func rewriteValueS390X_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (ORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XORW) v.AddArg(x) v.AddArg(y) @@ -4985,11 +5328,13 @@ func rewriteValueS390X_OpOr8_0(v *Value) bool { } } func rewriteValueS390X_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (ORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XORW) v.AddArg(x) v.AddArg(y) @@ -4997,14 +5342,17 @@ func rewriteValueS390X_OpOrB_0(v *Value) bool { } } func rewriteValueS390X_OpPanicBounds_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 0) { break } @@ -5020,9 +5368,9 @@ func rewriteValueS390X_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 1) { break } @@ -5038,9 +5386,9 @@ func rewriteValueS390X_OpPanicBounds_0(v *Value) bool { // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := v.AuxInt - mem := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + mem := v_2 if !(boundsABI(kind) == 2) { break } @@ -5054,12 +5402,13 @@ func rewriteValueS390X_OpPanicBounds_0(v *Value) bool { return false } func rewriteValueS390X_OpPopCount16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount16 x) // result: (MOVBZreg (SumBytes2 (POPCNT x))) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v0 := b.NewValue0(v.Pos, OpS390XSumBytes2, typ.UInt8) v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt16) @@ -5070,12 +5419,13 @@ func rewriteValueS390X_OpPopCount16_0(v *Value) bool { } } func rewriteValueS390X_OpPopCount32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount32 x) // result: (MOVBZreg (SumBytes4 (POPCNT x))) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v0 := b.NewValue0(v.Pos, OpS390XSumBytes4, typ.UInt8) v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt32) @@ -5086,12 +5436,13 @@ func rewriteValueS390X_OpPopCount32_0(v *Value) bool { } } func rewriteValueS390X_OpPopCount64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount64 x) // result: (MOVBZreg (SumBytes8 (POPCNT x))) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v0 := b.NewValue0(v.Pos, OpS390XSumBytes8, typ.UInt8) v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt64) @@ -5102,12 +5453,13 @@ func rewriteValueS390X_OpPopCount64_0(v *Value) bool { } } func rewriteValueS390X_OpPopCount8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount8 x) // result: (POPCNT (MOVBZreg x)) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XPOPCNT) v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) v0.AddArg(x) @@ -5116,15 +5468,15 @@ func rewriteValueS390X_OpPopCount8_0(v *Value) bool { } } func rewriteValueS390X_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (MOVDconst [c])) // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -5147,11 +5499,13 @@ func rewriteValueS390X_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueS390X_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft32 x y) // result: (RLL x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XRLL) v.AddArg(x) v.AddArg(y) @@ -5159,11 +5513,13 @@ func rewriteValueS390X_OpRotateLeft32_0(v *Value) bool { } } func rewriteValueS390X_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft64 x y) // result: (RLLG x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XRLLG) v.AddArg(x) v.AddArg(y) @@ -5171,15 +5527,15 @@ func rewriteValueS390X_OpRotateLeft64_0(v *Value) bool { } } func rewriteValueS390X_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (MOVDconst [c])) // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -5202,10 +5558,11 @@ func rewriteValueS390X_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueS390X_OpRound_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round x) // result: (FIDBR [1] x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFIDBR) v.AuxInt = 1 v.AddArg(x) @@ -5213,30 +5570,33 @@ func rewriteValueS390X_OpRound_0(v *Value) bool { } } func rewriteValueS390X_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: (LoweredRound32F x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XLoweredRound32F) v.AddArg(x) return true } } func rewriteValueS390X_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: (LoweredRound64F x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XLoweredRound64F) v.AddArg(x) return true } } func rewriteValueS390X_OpRoundToEven_0(v *Value) bool { + v_0 := v.Args[0] // match: (RoundToEven x) // result: (FIDBR [4] x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFIDBR) v.AuxInt = 4 v.AddArg(x) @@ -5244,14 +5604,16 @@ func rewriteValueS390X_OpRoundToEven_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5266,8 +5628,8 @@ func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5290,14 +5652,16 @@ func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5312,8 +5676,8 @@ func rewriteValueS390X_OpRsh16Ux32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5334,14 +5698,16 @@ func rewriteValueS390X_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5356,8 +5722,8 @@ func rewriteValueS390X_OpRsh16Ux64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5378,14 +5744,16 @@ func rewriteValueS390X_OpRsh16Ux64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5400,8 +5768,8 @@ func rewriteValueS390X_OpRsh16Ux8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5424,14 +5792,16 @@ func rewriteValueS390X_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5445,8 +5815,8 @@ func rewriteValueS390X_OpRsh16x16_0(v *Value) bool { // match: (Rsh16x16 x y) // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) @@ -5468,14 +5838,16 @@ func rewriteValueS390X_OpRsh16x16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5489,8 +5861,8 @@ func rewriteValueS390X_OpRsh16x32_0(v *Value) bool { // match: (Rsh16x32 x y) // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) @@ -5510,14 +5882,16 @@ func rewriteValueS390X_OpRsh16x32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5531,8 +5905,8 @@ func rewriteValueS390X_OpRsh16x64_0(v *Value) bool { // match: (Rsh16x64 x y) // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) @@ -5552,14 +5926,16 @@ func rewriteValueS390X_OpRsh16x64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5573,8 +5949,8 @@ func rewriteValueS390X_OpRsh16x8_0(v *Value) bool { // match: (Rsh16x8 x y) // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) v0.AddArg(x) @@ -5596,14 +5972,16 @@ func rewriteValueS390X_OpRsh16x8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5616,8 +5994,8 @@ func rewriteValueS390X_OpRsh32Ux16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5638,14 +6016,16 @@ func rewriteValueS390X_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5658,8 +6038,8 @@ func rewriteValueS390X_OpRsh32Ux32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5678,14 +6058,16 @@ func rewriteValueS390X_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5698,8 +6080,8 @@ func rewriteValueS390X_OpRsh32Ux64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5718,14 +6100,16 @@ func rewriteValueS390X_OpRsh32Ux64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // cond: shiftIsBounded(v) // result: (SRW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5738,8 +6122,8 @@ func rewriteValueS390X_OpRsh32Ux8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5760,14 +6144,16 @@ func rewriteValueS390X_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5779,8 +6165,8 @@ func rewriteValueS390X_OpRsh32x16_0(v *Value) bool { // match: (Rsh32x16 x y) // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -5800,13 +6186,15 @@ func rewriteValueS390X_OpRsh32x16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x32 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5818,8 +6206,8 @@ func rewriteValueS390X_OpRsh32x32_0(v *Value) bool { // match: (Rsh32x32 x y) // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -5837,13 +6225,15 @@ func rewriteValueS390X_OpRsh32x32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x64 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5855,8 +6245,8 @@ func rewriteValueS390X_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x y) // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -5874,14 +6264,16 @@ func rewriteValueS390X_OpRsh32x64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5893,8 +6285,8 @@ func rewriteValueS390X_OpRsh32x8_0(v *Value) bool { // match: (Rsh32x8 x y) // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -5914,14 +6306,16 @@ func rewriteValueS390X_OpRsh32x8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5934,8 +6328,8 @@ func rewriteValueS390X_OpRsh64Ux16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5956,14 +6350,16 @@ func rewriteValueS390X_OpRsh64Ux16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -5976,8 +6372,8 @@ func rewriteValueS390X_OpRsh64Ux32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -5996,14 +6392,16 @@ func rewriteValueS390X_OpRsh64Ux32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux64 x y) // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6016,8 +6414,8 @@ func rewriteValueS390X_OpRsh64Ux64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -6036,14 +6434,16 @@ func rewriteValueS390X_OpRsh64Ux64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) // cond: shiftIsBounded(v) // result: (SRD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6056,8 +6456,8 @@ func rewriteValueS390X_OpRsh64Ux8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -6078,14 +6478,16 @@ func rewriteValueS390X_OpRsh64Ux8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6097,8 +6499,8 @@ func rewriteValueS390X_OpRsh64x16_0(v *Value) bool { // match: (Rsh64x16 x y) // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -6118,13 +6520,15 @@ func rewriteValueS390X_OpRsh64x16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x32 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6136,8 +6540,8 @@ func rewriteValueS390X_OpRsh64x32_0(v *Value) bool { // match: (Rsh64x32 x y) // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -6155,13 +6559,15 @@ func rewriteValueS390X_OpRsh64x32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6173,8 +6579,8 @@ func rewriteValueS390X_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x y) // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -6192,14 +6598,16 @@ func rewriteValueS390X_OpRsh64x64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6211,8 +6619,8 @@ func rewriteValueS390X_OpRsh64x8_0(v *Value) bool { // match: (Rsh64x8 x y) // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) @@ -6232,14 +6640,16 @@ func rewriteValueS390X_OpRsh64x8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6254,8 +6664,8 @@ func rewriteValueS390X_OpRsh8Ux16_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -6278,14 +6688,16 @@ func rewriteValueS390X_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6300,8 +6712,8 @@ func rewriteValueS390X_OpRsh8Ux32_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -6322,14 +6734,16 @@ func rewriteValueS390X_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6344,8 +6758,8 @@ func rewriteValueS390X_OpRsh8Ux64_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -6366,14 +6780,16 @@ func rewriteValueS390X_OpRsh8Ux64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6388,8 +6804,8 @@ func rewriteValueS390X_OpRsh8Ux8_0(v *Value) bool { // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) for { t := v.Type - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XLOCGR) v.Type = t v.Aux = s390x.GreaterOrEqual @@ -6412,14 +6828,16 @@ func rewriteValueS390X_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6433,8 +6851,8 @@ func rewriteValueS390X_OpRsh8x16_0(v *Value) bool { // match: (Rsh8x16 x y) // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) @@ -6456,14 +6874,16 @@ func rewriteValueS390X_OpRsh8x16_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6477,8 +6897,8 @@ func rewriteValueS390X_OpRsh8x32_0(v *Value) bool { // match: (Rsh8x32 x y) // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) @@ -6498,14 +6918,16 @@ func rewriteValueS390X_OpRsh8x32_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6519,8 +6941,8 @@ func rewriteValueS390X_OpRsh8x64_0(v *Value) bool { // match: (Rsh8x64 x y) // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) @@ -6540,14 +6962,16 @@ func rewriteValueS390X_OpRsh8x64_0(v *Value) bool { } } func rewriteValueS390X_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -6561,8 +6985,8 @@ func rewriteValueS390X_OpRsh8x8_0(v *Value) bool { // match: (Rsh8x8 x y) // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSRAW) v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) v0.AddArg(x) @@ -6584,14 +7008,14 @@ func rewriteValueS390X_OpRsh8x8_0(v *Value) bool { } } func rewriteValueS390X_OpS390XADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) // result: (ADDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -6610,15 +7034,12 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { // cond: d == 64-c // result: (RLLGconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLDconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XSRDconst { continue } @@ -6637,10 +7058,8 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { // cond: ptr.Op != OpSB && idx.Op != OpSB // result: (MOVDaddridx [c] {s} ptr idx) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - idx := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + idx := v_0 if v_1.Op != OpS390XMOVDaddr { continue } @@ -6662,10 +7081,8 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { // match: (ADD x (NEG y)) // result: (SUB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XNEG { continue } @@ -6682,10 +7099,9 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVDload { continue } @@ -6710,14 +7126,14 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDC_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDC x (MOVDconst [c])) // cond: is16Bit(c) // result: (ADDCconst x [c]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -6735,13 +7151,14 @@ func rewriteValueS390X_OpS390XADDC_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDE_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDE x y (FlagEQ)) // result: (ADDC x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpS390XFlagEQ { break } @@ -6753,10 +7170,8 @@ func rewriteValueS390X_OpS390XADDE_0(v *Value) bool { // match: (ADDE x y (FlagLT)) // result: (ADDC x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpS390XFlagLT { break } @@ -6768,10 +7183,8 @@ func rewriteValueS390X_OpS390XADDE_0(v *Value) bool { // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c))))) // result: (ADDE x y c) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpSelect1 { break } @@ -6805,13 +7218,13 @@ func rewriteValueS390X_OpS390XADDE_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDW x (MOVDconst [c])) // result: (ADDWconst [int64(int32(c))] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -6827,15 +7240,12 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { // cond: d == 32-c // result: (RLLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLWconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XSRWconst { continue } @@ -6853,10 +7263,8 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { // match: (ADDW x (NEGW y)) // result: (SUBW x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XNEGW { continue } @@ -6873,10 +7281,9 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWload { continue } @@ -6903,10 +7310,9 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWZload { continue } @@ -6931,12 +7337,13 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDWconst [c] x) // cond: int32(c)==0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -6949,7 +7356,6 @@ func rewriteValueS390X_OpS390XADDWconst_0(v *Value) bool { // result: (MOVDconst [int64(int32(c+d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -6962,7 +7368,6 @@ func rewriteValueS390X_OpS390XADDWconst_0(v *Value) bool { // result: (ADDWconst [int64(int32(c+d))] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XADDWconst { break } @@ -6976,20 +7381,22 @@ func rewriteValueS390X_OpS390XADDWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) // cond: ptr.Op != OpSB && is20Bit(off1+off2) // result: (ADDWload [off1+off2] {sym} x ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -7007,15 +7414,14 @@ func rewriteValueS390X_OpS390XADDWload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -7030,12 +7436,12 @@ func rewriteValueS390X_OpS390XADDWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) // cond: ((c+d)&1 == 0) && is32Bit(c+d) // result: (MOVDaddr [c+d] {s} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -7056,7 +7462,6 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { // result: (MOVDaddr [c+d] {s} x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -7077,7 +7482,6 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { // result: (MOVDaddridx [c+d] {s} x y) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -7101,7 +7505,7 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -7111,7 +7515,6 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { // result: (MOVDconst [c+d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -7125,7 +7528,6 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { // result: (ADDconst [c+d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } @@ -7142,6 +7544,9 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ADDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) // cond: isSamePtr(ptr1, ptr2) @@ -7150,10 +7555,8 @@ func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { t := v.Type off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr1 := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr1 := v_1 if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -7176,14 +7579,13 @@ func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -7201,15 +7603,14 @@ func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -7224,16 +7625,16 @@ func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XAND_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (AND x (MOVDconst [c])) // cond: is32Bit(c) && c < 0 // result: (ANDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -7252,10 +7653,8 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // cond: is32Bit(c) && c >= 0 // result: (MOVWZreg (ANDWconst [int64(int32(c))] x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -7275,10 +7674,8 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND x (MOVDconst [0xFF])) // result: (MOVBZreg x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFF { continue } @@ -7291,10 +7688,8 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND x (MOVDconst [0xFFFF])) // result: (MOVHZreg x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFFFF { continue } @@ -7307,10 +7702,8 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND x (MOVDconst [0xFFFFFFFF])) // result: (MOVWZreg x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst || v_1.AuxInt != 0xFFFFFFFF { continue } @@ -7323,14 +7716,8 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND (MOVDconst [^(-1<<63)]) (LGDR x)) // result: (LGDR (LPDFR x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != ^(-1<<63) { - continue - } - v_1 := v.Args[1^_i0] - if v_1.Op != OpS390XLGDR { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != ^(-1<<63) || v_1.Op != OpS390XLGDR { continue } t := v_1.Type @@ -7347,14 +7734,11 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c&d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XMOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XMOVDconst { continue } @@ -7368,8 +7752,8 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -7382,10 +7766,9 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVDload { continue } @@ -7410,13 +7793,13 @@ func rewriteValueS390X_OpS390XAND_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ANDW x (MOVDconst [c])) // result: (ANDWconst [int64(int32(c))] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -7431,8 +7814,8 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { // match: (ANDW x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -7445,10 +7828,9 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWload { continue } @@ -7475,10 +7857,9 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWZload { continue } @@ -7503,11 +7884,11 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDWconst [c] (ANDWconst [d] x)) // result: (ANDWconst [c & d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -7524,7 +7905,7 @@ func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { if v.AuxInt != 0xFF { break } - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v.AddArg(x) return true @@ -7535,7 +7916,7 @@ func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { if v.AuxInt != 0xFFFF { break } - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVHZreg) v.AddArg(x) return true @@ -7557,7 +7938,7 @@ func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == -1) { break } @@ -7570,7 +7951,6 @@ func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { // result: (MOVDconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -7582,20 +7962,22 @@ func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XANDWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) // cond: ptr.Op != OpSB && is20Bit(off1+off2) // result: (ANDWload [off1+off2] {sym} x ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -7613,15 +7995,14 @@ func rewriteValueS390X_OpS390XANDWload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -7636,11 +8017,11 @@ func rewriteValueS390X_OpS390XANDWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XANDconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ANDconst [c] (ANDconst [d] x)) // result: (ANDconst [c & d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XANDconst { break } @@ -7667,7 +8048,7 @@ func rewriteValueS390X_OpS390XANDconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -7677,7 +8058,6 @@ func rewriteValueS390X_OpS390XANDconst_0(v *Value) bool { // result: (MOVDconst [c&d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -7689,6 +8069,9 @@ func rewriteValueS390X_OpS390XANDconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ANDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) // cond: isSamePtr(ptr1, ptr2) @@ -7697,10 +8080,8 @@ func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { t := v.Type off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr1 := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr1 := v_1 if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -7723,14 +8104,13 @@ func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -7748,15 +8128,14 @@ func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -7771,14 +8150,14 @@ func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMP_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMP x (MOVDconst [c])) // cond: is32Bit(c) // result: (CMPconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -7795,12 +8174,11 @@ func rewriteValueS390X_OpS390XCMP_0(v *Value) bool { // cond: is32Bit(c) // result: (InvertFlags (CMPconst x [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } c := v_0.AuxInt + x := v_1 if !(is32Bit(c)) { break } @@ -7814,14 +8192,14 @@ func rewriteValueS390X_OpS390XCMP_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPU x (MOVDconst [c])) // cond: isU32Bit(c) // result: (CMPUconst x [int64(int32(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -7838,12 +8216,11 @@ func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool { // cond: isU32Bit(c) // result: (InvertFlags (CMPUconst x [int64(int32(c))])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } c := v_0.AuxInt + x := v_1 if !(isU32Bit(c)) { break } @@ -7857,12 +8234,12 @@ func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPUconst (MOVDconst [x]) [y]) // cond: uint64(x)==uint64(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -7878,7 +8255,6 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -7894,7 +8270,6 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -7910,7 +8285,6 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (FlagLT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XSRDconst { break } @@ -7925,7 +8299,6 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } @@ -7939,7 +8312,7 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHreg { break } @@ -7952,7 +8325,7 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZreg { break } @@ -7965,7 +8338,7 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBreg { break } @@ -7978,7 +8351,7 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZreg { break } @@ -7992,7 +8365,6 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } @@ -8012,12 +8384,12 @@ func rewriteValueS390X_OpS390XCMPUconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPUconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (CMPUconst (MOVWreg x:(ANDWconst [m] _)) [c]) // cond: int32(m) >= 0 // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } @@ -8037,13 +8409,13 @@ func rewriteValueS390X_OpS390XCMPUconst_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVDconst [c])) // result: (CMPWconst x [int64(int32(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -8056,12 +8428,11 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { // match: (CMPW (MOVDconst [c]) x) // result: (InvertFlags (CMPWconst x [int64(int32(c))])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags) v0.AuxInt = int64(int32(c)) @@ -8072,9 +8443,7 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { // match: (CMPW x (MOVWreg y)) // result: (CMPW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -8087,9 +8456,7 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { // match: (CMPW x (MOVWZreg y)) // result: (CMPW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -8102,12 +8469,11 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { // match: (CMPW (MOVWreg x) y) // result: (CMPW x y) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } x := v_0.Args[0] + y := v_1 v.reset(OpS390XCMPW) v.AddArg(x) v.AddArg(y) @@ -8116,12 +8482,11 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { // match: (CMPW (MOVWZreg x) y) // result: (CMPW x y) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } x := v_0.Args[0] + y := v_1 v.reset(OpS390XCMPW) v.AddArg(x) v.AddArg(y) @@ -8130,13 +8495,13 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (CMPWU x (MOVDconst [c])) // result: (CMPWUconst x [int64(int32(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -8149,12 +8514,11 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { // match: (CMPWU (MOVDconst [c]) x) // result: (InvertFlags (CMPWUconst x [int64(int32(c))])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) v0.AuxInt = int64(int32(c)) @@ -8165,9 +8529,7 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { // match: (CMPWU x (MOVWreg y)) // result: (CMPWU x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -8180,9 +8542,7 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { // match: (CMPWU x (MOVWZreg y)) // result: (CMPWU x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -8195,12 +8555,11 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { // match: (CMPWU (MOVWreg x) y) // result: (CMPWU x y) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } x := v_0.Args[0] + y := v_1 v.reset(OpS390XCMPWU) v.AddArg(x) v.AddArg(y) @@ -8209,12 +8568,11 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { // match: (CMPWU (MOVWZreg x) y) // result: (CMPWU x y) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } x := v_0.Args[0] + y := v_1 v.reset(OpS390XCMPWU) v.AddArg(x) v.AddArg(y) @@ -8223,12 +8581,12 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPWUconst (MOVDconst [x]) [y]) // cond: uint32(x)==uint32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8244,7 +8602,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8260,7 +8617,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8276,7 +8632,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (FlagLT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVBZreg || !(0xff < c) { break } @@ -8288,7 +8643,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (FlagLT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) { break } @@ -8300,7 +8654,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (FlagLT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XSRWconst { break } @@ -8316,7 +8669,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (FlagLT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -8331,7 +8683,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } @@ -8345,7 +8696,6 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } @@ -8358,12 +8708,12 @@ func rewriteValueS390X_OpS390XCMPWUconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPWconst (MOVDconst [x]) [y]) // cond: int32(x)==int32(y) // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8379,7 +8729,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8395,7 +8744,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8411,7 +8759,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (FlagLT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVBZreg || !(0xff < c) { break } @@ -8423,7 +8770,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (FlagLT) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) { break } @@ -8435,7 +8781,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (FlagGT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XSRWconst { break } @@ -8451,7 +8796,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (FlagLT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -8467,7 +8811,7 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (CMPWUconst x [n]) for { n := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XSRWconst { break } @@ -8484,7 +8828,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } @@ -8498,7 +8841,6 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } @@ -8511,12 +8853,12 @@ func rewriteValueS390X_OpS390XCMPWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (CMPconst (MOVDconst [x]) [y]) // cond: x==y // result: (FlagEQ) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8532,7 +8874,6 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (FlagLT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8548,7 +8889,6 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (FlagGT) for { y := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -8564,7 +8904,6 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (FlagGT) for { n := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XSRDconst { break } @@ -8579,7 +8918,6 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } @@ -8593,7 +8931,7 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHreg { break } @@ -8606,7 +8944,7 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZreg { break } @@ -8619,7 +8957,7 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBreg { break } @@ -8632,7 +8970,7 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (CMPWconst x [c]) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZreg { break } @@ -8646,7 +8984,6 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWZreg { break } @@ -8666,12 +9003,12 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XCMPconst_10(v *Value) bool { + v_0 := v.Args[0] // match: (CMPconst (MOVWreg x:(ANDWconst [m] _)) [c]) // cond: int32(m) >= 0 && c >= 0 // result: (CMPWUconst x [c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVWreg { break } @@ -8693,7 +9030,7 @@ func rewriteValueS390X_OpS390XCMPconst_10(v *Value) bool { // result: (CMPUconst x [n]) for { n := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpS390XSRDconst { break } @@ -8709,13 +9046,13 @@ func rewriteValueS390X_OpS390XCMPconst_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XCPSDR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CPSDR y (FMOVDconst [c])) // cond: c & -1<<63 == 0 // result: (LPDFR y) for { - _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] + y := v_0 if v_1.Op != OpS390XFMOVDconst { break } @@ -8731,9 +9068,7 @@ func rewriteValueS390X_OpS390XCPSDR_0(v *Value) bool { // cond: c & -1<<63 != 0 // result: (LNDFR y) for { - _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] + y := v_0 if v_1.Op != OpS390XFMOVDconst { break } @@ -8748,18 +9083,18 @@ func rewriteValueS390X_OpS390XCPSDR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFADD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FADD (FMUL y z) x) // result: (FMADD x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XFMUL { continue } z := v_0.Args[1] y := v_0.Args[0] - x := v.Args[1^_i0] + x := v_1 v.reset(OpS390XFMADD) v.AddArg(x) v.AddArg(y) @@ -8771,18 +9106,18 @@ func rewriteValueS390X_OpS390XFADD_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFADDS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FADDS (FMULS y z) x) // result: (FMADDS x y z) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XFMULS { continue } z := v_0.Args[1] y := v_0.Args[0] - x := v.Args[1^_i0] + x := v_1 v.reset(OpS390XFMADDS) v.AddArg(x) v.AddArg(y) @@ -8794,15 +9129,15 @@ func rewriteValueS390X_OpS390XFADDS_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (LDGR x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -8822,9 +9157,7 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XFMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -8845,13 +9178,12 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -8868,14 +9200,13 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -8892,8 +9223,6 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -8901,6 +9230,7 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -8918,15 +9248,16 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -8943,20 +9274,22 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { break } @@ -8974,14 +9307,13 @@ func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XADDconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { break } @@ -8996,20 +9328,22 @@ func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is20Bit(off1+off2) // result: (FMOVDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is20Bit(off1 + off2)) { break } @@ -9027,15 +9361,14 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9053,8 +9386,6 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -9062,7 +9393,8 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9081,16 +9413,17 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -9108,21 +9441,24 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) // cond: is20Bit(c+d) // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { break } @@ -9141,15 +9477,14 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XADDconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { break } @@ -9165,15 +9500,15 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: x for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XFMOVSstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -9194,13 +9529,12 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -9217,14 +9551,13 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9241,8 +9574,6 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -9250,6 +9581,7 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9267,15 +9599,16 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -9292,20 +9625,22 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { break } @@ -9323,14 +9658,13 @@ func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XADDconst { break } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { break } @@ -9345,20 +9679,22 @@ func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is20Bit(off1+off2) // result: (FMOVSstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is20Bit(off1 + off2)) { break } @@ -9376,15 +9712,14 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9402,8 +9737,6 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -9411,7 +9744,8 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -9430,16 +9764,17 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -9457,21 +9792,24 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) // cond: is20Bit(c+d) // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { break } @@ -9490,15 +9828,14 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XADDconst { break } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { break } @@ -9514,10 +9851,10 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFNEG_0(v *Value) bool { + v_0 := v.Args[0] // match: (FNEG (LPDFR x)) // result: (LNDFR x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XLPDFR { break } @@ -9529,7 +9866,6 @@ func rewriteValueS390X_OpS390XFNEG_0(v *Value) bool { // match: (FNEG (LNDFR x)) // result: (LPDFR x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XLNDFR { break } @@ -9541,10 +9877,10 @@ func rewriteValueS390X_OpS390XFNEG_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFNEGS_0(v *Value) bool { + v_0 := v.Args[0] // match: (FNEGS (LPDFR x)) // result: (LNDFR x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XLPDFR { break } @@ -9556,7 +9892,6 @@ func rewriteValueS390X_OpS390XFNEGS_0(v *Value) bool { // match: (FNEGS (LNDFR x)) // result: (LPDFR x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XLNDFR { break } @@ -9568,16 +9903,17 @@ func rewriteValueS390X_OpS390XFNEGS_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFSUB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FSUB (FMUL y z) x) // result: (FMSUB x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XFMUL { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpS390XFMSUB) v.AddArg(x) v.AddArg(y) @@ -9587,16 +9923,17 @@ func rewriteValueS390X_OpS390XFSUB_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFSUBS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (FSUBS (FMULS y z) x) // result: (FMSUBS x y z) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XFMULS { break } z := v_0.Args[1] y := v_0.Args[0] + x := v_1 v.reset(OpS390XFMSUBS) v.AddArg(x) v.AddArg(y) @@ -9606,12 +9943,12 @@ func rewriteValueS390X_OpS390XFSUBS_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (LDGR (SRDconst [1] (SLDconst [1] x))) // result: (LPDFR (LDGR x)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpS390XSRDconst || v_0.AuxInt != 1 { break } @@ -9630,17 +9967,17 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { // result: (LPDFR (LDGR x)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpS390XAND { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != ^(-1<<63) { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpS390XLPDFR) v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) v0.AddArg(x) @@ -9653,17 +9990,17 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { // result: (LNDFR (LDGR x)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpS390XOR { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpS390XLNDFR) v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) v0.AddArg(x) @@ -9677,7 +10014,7 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { // result: @x.Block (LNDFR (LDGR (MOVDload [off] {sym} ptr mem))) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XORload { break } @@ -9710,7 +10047,6 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { // match: (LDGR (LGDR x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpS390XLGDR { break } @@ -9723,10 +10059,10 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XLEDBR_0(v *Value) bool { + v_0 := v.Args[0] // match: (LEDBR (LPDFR (LDEBR x))) // result: (LPDFR x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XLPDFR { break } @@ -9742,7 +10078,6 @@ func rewriteValueS390X_OpS390XLEDBR_0(v *Value) bool { // match: (LEDBR (LNDFR (LDEBR x))) // result: (LNDFR x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XLNDFR { break } @@ -9758,10 +10093,10 @@ func rewriteValueS390X_OpS390XLEDBR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XLGDR_0(v *Value) bool { + v_0 := v.Args[0] // match: (LGDR (LDGR x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpS390XLDGR { break } @@ -9774,14 +10109,15 @@ func rewriteValueS390X_OpS390XLGDR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (LOCGR {c} x y (InvertFlags cmp)) // result: (LOCGR {c.(s390x.CCMask).ReverseComparison()} x y cmp) for { c := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpS390XInvertFlags { break } @@ -9798,9 +10134,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[1] - v_2 := v.Args[2] + x := v_1 if v_2.Op != OpS390XFlagEQ || !(c.(s390x.CCMask)&s390x.Equal != 0) { break } @@ -9814,9 +10148,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[1] - v_2 := v.Args[2] + x := v_1 if v_2.Op != OpS390XFlagLT || !(c.(s390x.CCMask)&s390x.Less != 0) { break } @@ -9830,9 +10162,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[1] - v_2 := v.Args[2] + x := v_1 if v_2.Op != OpS390XFlagGT || !(c.(s390x.CCMask)&s390x.Greater != 0) { break } @@ -9846,9 +10176,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[1] - v_2 := v.Args[2] + x := v_1 if v_2.Op != OpS390XFlagOV || !(c.(s390x.CCMask)&s390x.Unordered != 0) { break } @@ -9862,9 +10190,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpS390XFlagEQ || !(c.(s390x.CCMask)&s390x.Equal == 0) { break } @@ -9878,9 +10204,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpS390XFlagLT || !(c.(s390x.CCMask)&s390x.Less == 0) { break } @@ -9894,9 +10218,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpS390XFlagGT || !(c.(s390x.CCMask)&s390x.Greater == 0) { break } @@ -9910,9 +10232,7 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { // result: x for { c := v.Aux - _ = v.Args[2] - x := v.Args[0] - v_2 := v.Args[2] + x := v_0 if v_2.Op != OpS390XFlagOV || !(c.(s390x.CCMask)&s390x.Unordered == 0) { break } @@ -9924,10 +10244,11 @@ func rewriteValueS390X_OpS390XLOCGR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XLoweredRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (LoweredRound32F x:(FMOVSconst)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XFMOVSconst { break } @@ -9939,10 +10260,11 @@ func rewriteValueS390X_OpS390XLoweredRound32F_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XLoweredRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (LoweredRound64F x:(FMOVDconst)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XFMOVDconst { break } @@ -9954,15 +10276,15 @@ func rewriteValueS390X_OpS390XLoweredRound64F_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (MOVBZreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVBstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -9982,13 +10304,12 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -10005,14 +10326,13 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10029,8 +10349,6 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -10038,6 +10356,7 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10055,15 +10374,16 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -10080,21 +10400,23 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -10114,15 +10436,14 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -10139,12 +10460,13 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBZreg e:(MOVBreg x)) // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBreg { break } @@ -10160,7 +10482,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHreg { break } @@ -10176,7 +10498,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWreg { break } @@ -10192,7 +10514,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBZreg { break } @@ -10208,7 +10530,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHZreg { break } @@ -10224,7 +10546,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWZreg { break } @@ -10240,7 +10562,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZload { break } @@ -10257,7 +10579,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZloadidx { break } @@ -10275,7 +10597,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // result: @x.Block (MOVBZload [o] {s} p mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBload { break } @@ -10301,7 +10623,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { // result: @x.Block (MOVBZloadidx [o] {s} p i mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBloadidx { break } @@ -10327,13 +10649,14 @@ func rewriteValueS390X_OpS390XMOVBZreg_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBZreg x:(Arg )) // cond: !t.IsSigned() && t.Size() == 1 // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -10349,7 +10672,6 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { // match: (MOVBZreg (MOVDconst [c])) // result: (MOVDconst [int64( uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -10362,7 +10684,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { // cond: int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XLOCGR { break } @@ -10388,7 +10710,6 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { // match: (MOVBZreg (ANDWconst [m] x)) // result: (MOVWZreg (ANDWconst [int64( uint8(m))] x)) for { - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -10404,15 +10725,15 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (MOVBreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVBstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -10432,13 +10753,12 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -10455,14 +10775,13 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10479,8 +10798,6 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -10488,6 +10805,7 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10505,15 +10823,16 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -10530,21 +10849,23 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVBloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -10564,15 +10885,14 @@ func rewriteValueS390X_OpS390XMOVBloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -10589,12 +10909,13 @@ func rewriteValueS390X_OpS390XMOVBloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVBreg e:(MOVBreg x)) // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBreg { break } @@ -10610,7 +10931,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHreg { break } @@ -10626,7 +10947,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWreg { break } @@ -10642,7 +10963,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBZreg { break } @@ -10658,7 +10979,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHZreg { break } @@ -10674,7 +10995,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWZreg { break } @@ -10690,7 +11011,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBload { break } @@ -10707,7 +11028,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBloadidx { break } @@ -10725,7 +11046,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // result: @x.Block (MOVBload [o] {s} p mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZload { break } @@ -10751,7 +11072,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { // result: @x.Block (MOVBloadidx [o] {s} p i mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZloadidx { break } @@ -10777,13 +11098,14 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBreg_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVBreg x:(Arg )) // cond: t.IsSigned() && t.Size() == 1 // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -10799,7 +11121,6 @@ func rewriteValueS390X_OpS390XMOVBreg_10(v *Value) bool { // match: (MOVBreg (MOVDconst [c])) // result: (MOVDconst [int64( int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -10812,7 +11133,6 @@ func rewriteValueS390X_OpS390XMOVBreg_10(v *Value) bool { // cond: int8(m) >= 0 // result: (MOVWZreg (ANDWconst [int64( uint8(m))] x)) for { - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -10831,18 +11151,20 @@ func rewriteValueS390X_OpS390XMOVBreg_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVBreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = off v.Aux = sym @@ -10856,13 +11178,12 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVBZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpS390XMOVBstore) v.AuxInt = off v.Aux = sym @@ -10877,14 +11198,13 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is20Bit(off1 + off2)) { break } @@ -10902,13 +11222,12 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is20Bit(off) && ptr.Op != OpSB) { break } @@ -10925,15 +11244,14 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10951,8 +11269,6 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -10960,7 +11276,8 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -10979,16 +11296,17 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -11009,10 +11327,9 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11038,15 +11355,14 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w0 := v.Args[1] + p := v_0 + w0 := v_1 if w0.Op != OpS390XSRDconst { break } j := w0.AuxInt w := w0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11072,10 +11388,9 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11098,21 +11413,23 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} p w0 mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w0 := v.Args[1] + p := v_0 + w0 := v_1 if w0.Op != OpS390XSRWconst { break } j := w0.AuxInt w := w0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11138,14 +11455,12 @@ func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11167,15 +11482,13 @@ func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11201,14 +11514,12 @@ func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRWconst || v_1.AuxInt != 8 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11230,15 +11541,13 @@ func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRWconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVBstore || x.AuxInt != i-1 || x.Aux != s { break } @@ -11261,19 +11570,20 @@ func rewriteValueS390X_OpS390XMOVBstore_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) // cond: is20Bit(ValAndOff(sc).Off()+off) // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(ValAndOff(sc).Off() + off)) { break } @@ -11290,14 +11600,13 @@ func rewriteValueS390X_OpS390XMOVBstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -11314,9 +11623,8 @@ func rewriteValueS390X_OpS390XMOVBstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpS390XMOVBstoreconst { break } @@ -11338,22 +11646,25 @@ func rewriteValueS390X_OpS390XMOVBstoreconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) // cond: is20Bit(c+d) // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -11374,16 +11685,15 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -11404,18 +11714,19 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -11440,23 +11751,24 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w0 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w0 := v_2 if w0.Op != OpS390XSRDconst { continue } j := w0.AuxInt w := w0.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -11481,18 +11793,19 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -11517,23 +11830,24 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w0 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w0 := v_2 if w0.Op != OpS390XSRWconst { continue } j := w0.AuxInt w := w0.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -11558,22 +11872,22 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHBRstoreidx) @@ -11594,23 +11908,23 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRDconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -11635,22 +11949,22 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 8 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVHBRstoreidx) @@ -11671,23 +11985,23 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRWconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVBstoreidx || x.AuxInt != i-1 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -11709,19 +12023,20 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDaddridx_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) // cond: is20Bit(c+d) && x.Op != OpSB // result: (MOVDaddridx [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } d := v_0.AuxInt x := v_0.Args[0] + y := v_1 if !(is20Bit(c+d) && x.Op != OpSB) { break } @@ -11738,9 +12053,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } @@ -11762,14 +12075,13 @@ func rewriteValueS390X_OpS390XMOVDaddridx_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off2 := v_0.AuxInt sym2 := v_0.Aux x := v_0.Args[0] + y := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } @@ -11786,9 +12098,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } @@ -11808,15 +12118,15 @@ func rewriteValueS390X_OpS390XMOVDaddridx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: x for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -11837,9 +12147,7 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XFMOVDstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -11859,13 +12167,12 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -11882,8 +12189,6 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -11891,6 +12196,7 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { break } @@ -11907,8 +12213,6 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -11916,6 +12220,7 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -11933,15 +12238,16 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -11958,21 +12264,23 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -11992,15 +12300,14 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -12017,20 +12324,22 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is20Bit(off1+off2) // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is20Bit(off1 + off2)) { break } @@ -12048,13 +12357,12 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB) { break } @@ -12071,8 +12379,6 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -12080,7 +12386,8 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { break } @@ -12098,8 +12405,6 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -12107,7 +12412,8 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -12126,16 +12432,17 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -12156,10 +12463,9 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w1 := v.Args[1] - x := v.Args[2] + p := v_0 + w1 := v_1 + x := v_2 if x.Op != OpS390XMOVDstore || x.AuxInt != i-8 || x.Aux != s { break } @@ -12186,10 +12492,9 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w2 := v.Args[1] - x := v.Args[2] + p := v_0 + w2 := v_1 + x := v_2 if x.Op != OpS390XSTMG2 || x.AuxInt != i-16 || x.Aux != s { break } @@ -12218,10 +12523,9 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w3 := v.Args[1] - x := v.Args[2] + p := v_0 + w3 := v_1 + x := v_2 if x.Op != OpS390XSTMG3 || x.AuxInt != i-24 || x.Aux != s { break } @@ -12249,19 +12553,20 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) // cond: isU12Bit(ValAndOff(sc).Off()+off) // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU12Bit(ValAndOff(sc).Off() + off)) { break } @@ -12278,14 +12583,13 @@ func rewriteValueS390X_OpS390XMOVDstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -12299,22 +12603,25 @@ func rewriteValueS390X_OpS390XMOVDstoreconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) // cond: is20Bit(c+d) // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -12335,16 +12642,15 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -12362,20 +12668,21 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHBRstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstore [i-2] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -12397,15 +12704,13 @@ func rewriteValueS390X_OpS390XMOVHBRstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -12431,14 +12736,12 @@ func rewriteValueS390X_OpS390XMOVHBRstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRWconst || v_1.AuxInt != 16 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -12460,15 +12763,13 @@ func rewriteValueS390X_OpS390XMOVHBRstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRWconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVHBRstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -12491,28 +12792,32 @@ func rewriteValueS390X_OpS390XMOVHBRstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 16 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWBRstoreidx) @@ -12533,23 +12838,23 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRDconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -12574,22 +12879,22 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRWconst || v_2.AuxInt != 16 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVWBRstoreidx) @@ -12610,23 +12915,23 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRWconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVHBRstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -12648,15 +12953,15 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (MOVHZreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVHstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -12676,13 +12981,12 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -12699,8 +13003,6 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -12708,6 +13010,7 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { break } @@ -12724,8 +13027,6 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -12733,6 +13034,7 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -12750,15 +13052,16 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -12775,21 +13078,23 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -12809,15 +13114,14 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -12834,12 +13138,13 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVHZreg e:(MOVBZreg x)) // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBZreg { break } @@ -12855,7 +13160,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHreg { break } @@ -12871,7 +13176,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWreg { break } @@ -12887,7 +13192,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHZreg { break } @@ -12903,7 +13208,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWZreg { break } @@ -12919,7 +13224,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZload { break } @@ -12936,7 +13241,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZloadidx { break } @@ -12953,7 +13258,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZload { break } @@ -12970,7 +13275,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZloadidx { break } @@ -12988,7 +13293,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { // result: @x.Block (MOVHZload [o] {s} p mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHload { break } @@ -13012,6 +13317,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHZreg_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVHZreg x:(MOVHloadidx [o] {s} p i mem)) @@ -13019,7 +13325,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_10(v *Value) bool { // result: @x.Block (MOVHZloadidx [o] {s} p i mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHloadidx { break } @@ -13046,7 +13352,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_10(v *Value) bool { // cond: !t.IsSigned() && t.Size() <= 2 // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -13062,7 +13368,6 @@ func rewriteValueS390X_OpS390XMOVHZreg_10(v *Value) bool { // match: (MOVHZreg (MOVDconst [c])) // result: (MOVDconst [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -13074,7 +13379,6 @@ func rewriteValueS390X_OpS390XMOVHZreg_10(v *Value) bool { // match: (MOVHZreg (ANDWconst [m] x)) // result: (MOVWZreg (ANDWconst [int64(uint16(m))] x)) for { - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -13090,15 +13394,15 @@ func rewriteValueS390X_OpS390XMOVHZreg_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (MOVHreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVHstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -13118,13 +13422,12 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -13141,8 +13444,6 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -13150,6 +13451,7 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { break } @@ -13166,8 +13468,6 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -13175,6 +13475,7 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13192,15 +13493,16 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -13217,21 +13519,23 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVHloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -13251,15 +13555,14 @@ func rewriteValueS390X_OpS390XMOVHloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -13276,11 +13579,12 @@ func rewriteValueS390X_OpS390XMOVHloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVHreg e:(MOVBreg x)) // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBreg { break } @@ -13296,7 +13600,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHreg { break } @@ -13312,7 +13616,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWreg { break } @@ -13328,7 +13632,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHZreg { break } @@ -13344,7 +13648,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWZreg { break } @@ -13360,7 +13664,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBload { break } @@ -13377,7 +13681,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBloadidx { break } @@ -13394,7 +13698,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHload { break } @@ -13411,7 +13715,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHloadidx { break } @@ -13428,7 +13732,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZload { break } @@ -13444,13 +13748,14 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVHreg x:(MOVBZloadidx _ _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZloadidx { break } @@ -13468,7 +13773,7 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { // result: @x.Block (MOVHload [o] {s} p mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZload { break } @@ -13494,7 +13799,7 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { // result: @x.Block (MOVHloadidx [o] {s} p i mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZloadidx { break } @@ -13521,7 +13826,7 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { // cond: t.IsSigned() && t.Size() <= 2 // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -13537,7 +13842,6 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { // match: (MOVHreg (MOVDconst [c])) // result: (MOVDconst [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -13550,7 +13854,6 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { // cond: int16(m) >= 0 // result: (MOVWZreg (ANDWconst [int64(uint16(m))] x)) for { - v_0 := v.Args[0] if v_0.Op != OpS390XANDWconst { break } @@ -13569,18 +13872,20 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVHreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpS390XMOVHstore) v.AuxInt = off v.Aux = sym @@ -13594,13 +13899,12 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVHZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpS390XMOVHstore) v.AuxInt = off v.Aux = sym @@ -13615,14 +13919,13 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is20Bit(off1 + off2)) { break } @@ -13640,13 +13943,12 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(isU12Bit(off) && ptr.Op != OpSB) { break } @@ -13663,8 +13965,6 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -13672,7 +13972,8 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { break } @@ -13690,8 +13991,6 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -13699,7 +13998,8 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -13718,16 +14018,17 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -13748,10 +14049,9 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -13777,15 +14077,14 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w0 := v.Args[1] + p := v_0 + w0 := v_1 if w0.Op != OpS390XSRDconst { break } j := w0.AuxInt w := w0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -13811,10 +14110,9 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] + p := v_0 + w := v_1 + x := v_2 if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -13837,21 +14135,23 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) // result: (MOVWstore [i-2] {s} p w0 mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w0 := v.Args[1] + p := v_0 + w0 := v_1 if w0.Op != OpS390XSRWconst { break } j := w0.AuxInt w := w0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVHstore || x.AuxInt != i-2 || x.Aux != s { break } @@ -13874,6 +14174,8 @@ func rewriteValueS390X_OpS390XMOVHstore_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) @@ -13882,13 +14184,12 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU12Bit(ValAndOff(sc).Off() + off)) { break } @@ -13905,14 +14206,13 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -13929,9 +14229,8 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpS390XMOVHstoreconst { break } @@ -13956,22 +14255,25 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) // cond: is20Bit(c+d) // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -13992,16 +14294,15 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -14022,18 +14323,19 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -14058,23 +14360,24 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w0 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w0 := v_2 if w0.Op != OpS390XSRDconst { continue } j := w0.AuxInt w := w0.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -14099,18 +14402,19 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -14135,23 +14439,24 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w0 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w0 := v_2 if w0.Op != OpS390XSRWconst { continue } j := w0.AuxInt w := w0.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVHstoreidx || x.AuxInt != i-2 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -14173,20 +14478,21 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWBRstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVDBRstore [i-4] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 32 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVWBRstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -14208,15 +14514,13 @@ func rewriteValueS390X_OpS390XMOVWBRstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst { break } j := v_1.AuxInt w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVWBRstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -14239,28 +14543,32 @@ func rewriteValueS390X_OpS390XMOVWBRstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWBRstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRDconst || v_2.AuxInt != 32 { continue } w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { continue } v.reset(OpS390XMOVDBRstoreidx) @@ -14281,23 +14589,23 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - v_2 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 if v_2.Op != OpS390XSRDconst { continue } j := v_2.AuxInt w := v_2.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVWBRstoreidx || x.AuxInt != i-4 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } w0 := x.Args[2] @@ -14319,15 +14627,15 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (MOVWZreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -14347,13 +14655,12 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -14370,8 +14677,6 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -14379,6 +14684,7 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { break } @@ -14395,8 +14701,6 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -14404,6 +14708,7 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -14421,15 +14726,16 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -14446,21 +14752,23 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -14480,15 +14788,14 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -14505,11 +14812,12 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWZreg e:(MOVBZreg x)) // cond: clobberIfDead(e) // result: (MOVBZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBZreg { break } @@ -14525,7 +14833,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHZreg { break } @@ -14541,7 +14849,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVWZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWreg { break } @@ -14557,7 +14865,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVWZreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWZreg { break } @@ -14573,7 +14881,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZload { break } @@ -14590,7 +14898,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZloadidx { break } @@ -14607,7 +14915,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZload { break } @@ -14624,7 +14932,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZloadidx { break } @@ -14641,7 +14949,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 4) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWZload { break } @@ -14658,7 +14966,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 4) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWZloadidx { break } @@ -14674,13 +14982,14 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVWZreg x:(MOVWload [o] {s} p mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWZload [o] {s} p mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWload { break } @@ -14706,7 +15015,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { // result: @x.Block (MOVWZloadidx [o] {s} p i mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWloadidx { break } @@ -14733,7 +15042,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { // cond: !t.IsSigned() && t.Size() <= 4 // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -14749,7 +15058,6 @@ func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { // match: (MOVWZreg (MOVDconst [c])) // result: (MOVDconst [int64(uint32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -14761,15 +15069,15 @@ func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) // cond: isSamePtr(ptr1, ptr2) // result: (MOVWreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] - ptr1 := v.Args[0] - v_1 := v.Args[1] + ptr1 := v_0 if v_1.Op != OpS390XMOVWstore || v_1.AuxInt != off || v_1.Aux != sym { break } @@ -14789,13 +15097,12 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(is20Bit(off1 + off2)) { break } @@ -14812,8 +15119,6 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -14821,6 +15126,7 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { break } @@ -14837,8 +15143,6 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -14846,6 +15150,7 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] + mem := v_1 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -14863,15 +15168,16 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + mem := v_1 if !(ptr.Op != OpSB) { continue } @@ -14888,21 +15194,23 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWloadidx_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) // cond: is20Bit(c+d) // result: (MOVWloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] + idx := v_1 + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -14922,15 +15230,14 @@ func rewriteValueS390X_OpS390XMOVWloadidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[2] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] + mem := v_2 if !(is20Bit(c + d)) { continue } @@ -14947,11 +15254,12 @@ func rewriteValueS390X_OpS390XMOVWloadidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { + v_0 := v.Args[0] // match: (MOVWreg e:(MOVBreg x)) // cond: clobberIfDead(e) // result: (MOVBreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVBreg { break } @@ -14967,7 +15275,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVHreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVHreg { break } @@ -14983,7 +15291,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVWreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWreg { break } @@ -14999,7 +15307,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: clobberIfDead(e) // result: (MOVWreg x) for { - e := v.Args[0] + e := v_0 if e.Op != OpS390XMOVWZreg { break } @@ -15015,7 +15323,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBload { break } @@ -15032,7 +15340,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBloadidx { break } @@ -15049,7 +15357,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHload { break } @@ -15066,7 +15374,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHloadidx { break } @@ -15083,7 +15391,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWload { break } @@ -15100,7 +15408,7 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { // cond: (x.Type.IsSigned() || x.Type.Size() == 8) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWloadidx { break } @@ -15116,12 +15424,13 @@ func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MOVWreg x:(MOVBZload _ _)) // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZload { break } @@ -15138,7 +15447,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVBZloadidx { break } @@ -15155,7 +15464,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZload { break } @@ -15172,7 +15481,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVHZloadidx { break } @@ -15190,7 +15499,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // result: @x.Block (MOVWload [o] {s} p mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWZload { break } @@ -15216,7 +15525,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // result: @x.Block (MOVWloadidx [o] {s} p i mem) for { t := v.Type - x := v.Args[0] + x := v_0 if x.Op != OpS390XMOVWZloadidx { break } @@ -15243,7 +15552,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // cond: t.IsSigned() && t.Size() <= 4 // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpArg { break } @@ -15259,7 +15568,6 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { // match: (MOVWreg (MOVDconst [c])) // result: (MOVDconst [int64(int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -15271,18 +15579,20 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVWreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpS390XMOVWstore) v.AuxInt = off v.Aux = sym @@ -15296,13 +15606,12 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVWZreg { break } x := v_1.Args[0] + mem := v_2 v.reset(OpS390XMOVWstore) v.AuxInt = off v.Aux = sym @@ -15317,14 +15626,13 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is20Bit(off1 + off2)) { break } @@ -15342,13 +15650,12 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + ptr := v_0 if v_1.Op != OpS390XMOVDconst { break } c := v_1.AuxInt + mem := v_2 if !(is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB) { break } @@ -15365,8 +15672,6 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } @@ -15374,7 +15679,8 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { break } @@ -15392,8 +15698,6 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { off1 := v.AuxInt sym1 := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddridx { break } @@ -15401,7 +15705,8 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { sym2 := v_0.Aux idx := v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } @@ -15420,16 +15725,17 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { off := v.AuxInt sym := v.Aux - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpS390XADD { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v_0.Args[_i0] - idx := v_0.Args[1^_i0] - val := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + ptr := v_0_0 + idx := v_0_1 + val := v_1 + mem := v_2 if !(ptr.Op != OpSB) { continue } @@ -15450,14 +15756,12 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 32 { break } w := v_1.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVWstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -15479,15 +15783,14 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w0 := v.Args[1] + p := v_0 + w0 := v_1 if w0.Op != OpS390XSRDconst { break } j := w0.AuxInt w := w0.Args[0] - x := v.Args[2] + x := v_2 if x.Op != OpS390XMOVWstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -15513,10 +15816,9 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w1 := v.Args[1] - x := v.Args[2] + p := v_0 + w1 := v_1 + x := v_2 if x.Op != OpS390XMOVWstore || x.AuxInt != i-4 || x.Aux != s { break } @@ -15540,16 +15842,18 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) // result: (STM3 [i-8] {s} p w0 w1 w2 mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w2 := v.Args[1] - x := v.Args[2] + p := v_0 + w2 := v_1 + x := v_2 if x.Op != OpS390XSTM2 || x.AuxInt != i-8 || x.Aux != s { break } @@ -15578,10 +15882,9 @@ func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w3 := v.Args[1] - x := v.Args[2] + p := v_0 + w3 := v_1 + x := v_2 if x.Op != OpS390XSTM3 || x.AuxInt != i-12 || x.Aux != s { break } @@ -15609,6 +15912,8 @@ func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) @@ -15617,13 +15922,12 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt s := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } off := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU12Bit(ValAndOff(sc).Off() + off)) { break } @@ -15640,14 +15944,13 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { for { sc := v.AuxInt sym1 := v.Aux - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDaddr { break } off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] + mem := v_1 if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } @@ -15664,9 +15967,8 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] + p := v_0 + x := v_1 if x.Op != OpS390XMOVWstoreconst { break } @@ -15691,22 +15993,25 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) // cond: is20Bit(c+d) // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XADDconst { continue } d := v_0.AuxInt ptr := v_0.Args[0] - idx := v.Args[1^_i0] - val := v.Args[2] + idx := v_1 + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -15727,16 +16032,15 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { for { c := v.AuxInt sym := v.Aux - mem := v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - ptr := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + ptr := v_0 if v_1.Op != OpS390XADDconst { continue } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] + val := v_2 + mem := v_3 if !(is20Bit(c + d)) { continue } @@ -15757,18 +16061,19 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w := v.Args[2] - x := v.Args[3] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w := v_2 + x := v_3 if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -15793,23 +16098,24 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - for _i0 := 0; _i0 <= 1; _i0++ { - p := v.Args[_i0] - idx := v.Args[1^_i0] - w0 := v.Args[2] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + p := v_0 + idx := v_1 + w0 := v_2 if w0.Op != OpS390XSRDconst { continue } j := w0.AuxInt w := w0.Args[0] - x := v.Args[3] + x := v_3 if x.Op != OpS390XMOVWstoreidx || x.AuxInt != i-4 || x.Aux != s { continue } mem := x.Args[3] - for _i1 := 0; _i1 <= 1; _i1++ { - if p != x.Args[_i1] || idx != x.Args[1^_i1] { + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 { + if p != x_0 || idx != x_1 { continue } x_2 := x.Args[2] @@ -15831,14 +16137,14 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULLD x (MOVDconst [c])) // cond: is32Bit(c) // result: (MULLDconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -15858,10 +16164,9 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVDload { continue } @@ -15886,6 +16191,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLDconst [-1] x) // result: (NEG x) @@ -15893,7 +16199,7 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpS390XNEG) v.AddArg(x) return true @@ -15914,7 +16220,7 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { if v.AuxInt != 1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -15925,7 +16231,7 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { // result: (SLDconst [log2(c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c)) { break } @@ -15939,7 +16245,7 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { // result: (SUB (SLDconst [log2(c+1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c+1) && c >= 15) { break } @@ -15956,7 +16262,7 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { // result: (ADD (SLDconst [log2(c-1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-1) && c >= 17) { break } @@ -15972,7 +16278,6 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { // result: (MOVDconst [c*d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -15984,6 +16289,9 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (MULLDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) // cond: isSamePtr(ptr1, ptr2) @@ -15992,10 +16300,8 @@ func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { t := v.Type off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr1 := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr1 := v_1 if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -16018,14 +16324,13 @@ func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -16043,15 +16348,14 @@ func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -16066,13 +16370,13 @@ func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULLW x (MOVDconst [c])) // result: (MULLWconst [int64(int32(c))] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -16089,10 +16393,9 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWload { continue } @@ -16119,10 +16422,9 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWZload { continue } @@ -16147,6 +16449,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (MULLWconst [-1] x) // result: (NEGW x) @@ -16154,7 +16457,7 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { if v.AuxInt != -1 { break } - x := v.Args[0] + x := v_0 v.reset(OpS390XNEGW) v.AddArg(x) return true @@ -16175,7 +16478,7 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { if v.AuxInt != 1 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -16186,7 +16489,7 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { // result: (SLWconst [log2(c)] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c)) { break } @@ -16200,7 +16503,7 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { // result: (SUBW (SLWconst [log2(c+1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c+1) && c >= 15) { break } @@ -16217,7 +16520,7 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { // result: (ADDW (SLWconst [log2(c-1)] x) x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(isPowerOfTwo(c-1) && c >= 17) { break } @@ -16233,7 +16536,6 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { // result: (MOVDconst [int64(int32(c*d))]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -16245,20 +16547,22 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMULLWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) // cond: ptr.Op != OpSB && is20Bit(off1+off2) // result: (MULLWload [off1+off2] {sym} x ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -16276,15 +16580,14 @@ func rewriteValueS390X_OpS390XMULLWload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -16299,10 +16602,10 @@ func rewriteValueS390X_OpS390XMULLWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XNEG_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEG (MOVDconst [c])) // result: (MOVDconst [-c]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -16315,7 +16618,6 @@ func rewriteValueS390X_OpS390XNEG_0(v *Value) bool { // cond: c != -(1<<31) // result: (ADDconst [-c] x) for { - v_0 := v.Args[0] if v_0.Op != OpS390XADDconst { break } @@ -16336,10 +16638,10 @@ func rewriteValueS390X_OpS390XNEG_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XNEGW_0(v *Value) bool { + v_0 := v.Args[0] // match: (NEGW (MOVDconst [c])) // result: (MOVDconst [int64(int32(-c))]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -16351,13 +16653,14 @@ func rewriteValueS390X_OpS390XNEGW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XNOT_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NOT x) // cond: true // result: (XOR (MOVDconst [-1]) x) for { - x := v.Args[0] + x := v_0 if !(true) { break } @@ -16371,11 +16674,12 @@ func rewriteValueS390X_OpS390XNOT_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XNOTW_0(v *Value) bool { + v_0 := v.Args[0] // match: (NOTW x) // cond: true // result: (XORWconst [-1] x) for { - x := v.Args[0] + x := v_0 if !(true) { break } @@ -16387,15 +16691,15 @@ func rewriteValueS390X_OpS390XNOTW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (OR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (ORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -16414,15 +16718,12 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // cond: d == 64-c // result: (RLLGconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLDconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XSRDconst { continue } @@ -16440,14 +16741,8 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // match: (OR (MOVDconst [-1<<63]) (LGDR x)) // result: (LGDR (LNDFR x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] - if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != -1<<63 { - continue - } - v_1 := v.Args[1^_i0] - if v_1.Op != OpS390XLGDR { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XMOVDconst || v_0.AuxInt != -1<<63 || v_1.Op != OpS390XLGDR { continue } t := v_1.Type @@ -16464,9 +16759,7 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // match: (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR y))) // result: (LGDR (CPSDR y x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { continue } @@ -16479,7 +16772,6 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { continue } x := v_0_0_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XLGDR { continue } @@ -16502,9 +16794,7 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // cond: c & -1<<63 == 0 // result: (LGDR (CPSDR (FMOVDconst [c]) x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 63 { continue } @@ -16517,7 +16807,6 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { continue } x := v_0_0_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XMOVDconst { continue } @@ -16539,24 +16828,18 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // match: (OR (AND (MOVDconst [-1<<63]) (LGDR x)) (LGDR (LPDFR y))) // result: (LGDR (CPSDR y x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XAND { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_0_0 := v_0.Args[_i1] - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { - continue - } - v_0_1 := v_0.Args[1^_i1] - if v_0_1.Op != OpS390XLGDR { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 || v_0_1.Op != OpS390XLGDR { continue } x := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XLGDR { continue } @@ -16580,24 +16863,18 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // cond: c & -1<<63 == 0 // result: (LGDR (CPSDR (FMOVDconst [c]) x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XAND { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_0_0 := v_0.Args[_i1] - if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 { - continue - } - v_0_1 := v_0.Args[1^_i1] - if v_0_1.Op != OpS390XLGDR { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + if v_0_0.Op != OpS390XMOVDconst || v_0_0.AuxInt != -1<<63 || v_0_1.Op != OpS390XLGDR { continue } x := v_0_1.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XMOVDconst { continue } @@ -16620,14 +16897,11 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // match: (OR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c|d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XMOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XMOVDconst { continue } @@ -16641,8 +16915,8 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // match: (OR x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -16655,10 +16929,9 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { // result: (ORload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVDload { continue } @@ -16683,15 +16956,16 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XOR_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OR x1:(MOVBZload [i1] {s} p mem) sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem))) // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVBZload { continue } @@ -16699,7 +16973,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { continue } @@ -16731,9 +17005,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVHZload { continue } @@ -16741,7 +17014,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { continue } @@ -16773,9 +17046,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVWZload { continue } @@ -16783,7 +17055,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { continue } @@ -16815,9 +17087,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpS390XSLDconst { continue } @@ -16830,13 +17101,15 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpS390XSLDconst { continue } @@ -16853,7 +17126,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -16880,9 +17153,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpS390XSLDconst { continue } @@ -16895,13 +17167,15 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpS390XSLDconst { continue } @@ -16918,7 +17192,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -16945,19 +17219,20 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVBZloadidx { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { continue } @@ -16970,8 +17245,10 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -16993,19 +17270,20 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVHZloadidx { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { continue } @@ -17018,8 +17296,10 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -17041,19 +17321,20 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVWZloadidx { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { continue } @@ -17066,8 +17347,10 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -17089,9 +17372,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpS390XSLDconst { continue } @@ -17103,16 +17385,20 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - or := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s1 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpS390XSLDconst { continue } @@ -17126,11 +17412,13 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -17160,9 +17448,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpS390XSLDconst { continue } @@ -17174,16 +17461,20 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - or := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s1 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpS390XSLDconst { continue } @@ -17197,11 +17488,13 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -17230,15 +17523,16 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XOR_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (OR x0:(MOVBZload [i0] {s} p mem) sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem))) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpS390XMOVBZload { continue } @@ -17246,7 +17540,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { continue } @@ -17280,9 +17574,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r0 := v_0 if r0.Op != OpS390XMOVHZreg { continue } @@ -17294,7 +17587,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { continue } @@ -17332,9 +17625,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r0 := v_0 if r0.Op != OpS390XMOVWZreg { continue } @@ -17346,7 +17638,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { continue } @@ -17382,9 +17674,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpS390XSLDconst { continue } @@ -17397,13 +17688,15 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpS390XSLDconst { continue } @@ -17420,7 +17713,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { if p != x0.Args[0] || mem != x0.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -17449,9 +17742,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpS390XSLDconst { continue } @@ -17468,13 +17760,15 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpS390XSLDconst { continue } @@ -17495,7 +17789,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { if p != x0.Args[0] || mem != x0.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -17524,19 +17818,20 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpS390XMOVBZloadidx { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 8 { continue } @@ -17549,8 +17844,10 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -17574,9 +17871,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r0 := v_0 if r0.Op != OpS390XMOVHZreg { continue } @@ -17587,10 +17883,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 16 { continue } @@ -17607,8 +17905,10 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -17632,9 +17932,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r0 := v_0 if r0.Op != OpS390XMOVWZreg { continue } @@ -17645,10 +17944,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpS390XSLDconst || sh.AuxInt != 32 { continue } @@ -17665,8 +17966,10 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -17688,9 +17991,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpS390XSLDconst { continue } @@ -17702,16 +18004,20 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - or := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s0 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpS390XSLDconst { continue } @@ -17725,11 +18031,13 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { continue } _ = x0.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -17761,9 +18069,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpS390XSLDconst { continue } @@ -17779,16 +18086,20 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - or := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + or := v_1 if or.Op != OpS390XOR { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s0 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpS390XSLDconst { continue } @@ -17806,11 +18117,13 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { continue } _ = x0.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -17841,15 +18154,15 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { return false } func rewriteValueS390X_OpS390XORW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORW x (MOVDconst [c])) // result: (ORWconst [int64(int32(c))] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -17865,15 +18178,12 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // cond: d == 32-c // result: (RLLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLWconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XSRWconst { continue } @@ -17891,8 +18201,8 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // match: (ORW x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -17905,10 +18215,9 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWload { continue } @@ -17935,10 +18244,9 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWZload { continue } @@ -17964,9 +18272,8 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVBZload { continue } @@ -17974,7 +18281,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { continue } @@ -18006,9 +18313,8 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVHZload { continue } @@ -18016,7 +18322,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { continue } @@ -18048,9 +18354,8 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpS390XSLWconst { continue } @@ -18063,13 +18368,15 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpS390XORW { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s1 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpS390XSLWconst { continue } @@ -18086,7 +18393,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { if p != x1.Args[0] || mem != x1.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -18113,19 +18420,20 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVBZloadidx { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { continue } @@ -18138,8 +18446,10 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -18161,19 +18471,20 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x1 := v_0 if x1.Op != OpS390XMOVHZloadidx { continue } i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - sh := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { continue } @@ -18186,8 +18497,10 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { continue } _ = x0.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x0.Args[_i2] || idx != x0.Args[1^_i2] || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -18208,15 +18521,16 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XORW_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s0 := v_0 if s0.Op != OpS390XSLWconst { continue } @@ -18228,16 +18542,20 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - or := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + or := v_1 if or.Op != OpS390XORW { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s1 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s1 := or_0 if s1.Op != OpS390XSLWconst { continue } @@ -18251,11 +18569,13 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { continue } _ = x1.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x1.Args[_i3] || idx != x1.Args[1^_i3] || mem != x1.Args[2] { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -18285,9 +18605,8 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpS390XMOVBZload { continue } @@ -18295,7 +18614,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { continue } @@ -18329,9 +18648,8 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r0 := v_0 if r0.Op != OpS390XMOVHZreg { continue } @@ -18343,7 +18661,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { s := x0.Aux mem := x0.Args[1] p := x0.Args[0] - sh := v.Args[1^_i0] + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { continue } @@ -18379,9 +18697,8 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpS390XSLWconst { continue } @@ -18394,13 +18711,15 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { s := x1.Aux mem := x1.Args[1] p := x1.Args[0] - or := v.Args[1^_i0] + or := v_1 if or.Op != OpS390XORW { continue } _ = or.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - s0 := or.Args[_i1] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpS390XSLWconst { continue } @@ -18417,7 +18736,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { if p != x0.Args[0] || mem != x0.Args[1] { continue } - y := or.Args[1^_i1] + y := or_1 if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -18446,19 +18765,20 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 if x0.Op != OpS390XMOVBZloadidx { continue } i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 8 { continue } @@ -18471,8 +18791,10 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -18496,9 +18818,8 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - r0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r0 := v_0 if r0.Op != OpS390XMOVHZreg { continue } @@ -18509,10 +18830,12 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { i0 := x0.AuxInt s := x0.Aux mem := x0.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x0.Args[_i1] - idx := x0.Args[1^_i1] - sh := v.Args[1^_i0] + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 { + p := x0_0 + idx := x0_1 + sh := v_1 if sh.Op != OpS390XSLWconst || sh.AuxInt != 16 { continue } @@ -18529,8 +18852,10 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { continue } _ = x1.Args[2] - for _i2 := 0; _i2 <= 1; _i2++ { - if p != x1.Args[_i2] || idx != x1.Args[1^_i2] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 { + if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { continue } b = mergePoint(b, x0, x1) @@ -18552,9 +18877,8 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1,y) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s1 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s1 := v_0 if s1.Op != OpS390XSLWconst { continue } @@ -18566,16 +18890,20 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { i1 := x1.AuxInt s := x1.Aux mem := x1.Args[2] - for _i1 := 0; _i1 <= 1; _i1++ { - p := x1.Args[_i1] - idx := x1.Args[1^_i1] - or := v.Args[1^_i0] + x1_0 := x1.Args[0] + x1_1 := x1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, x1_0, x1_1 = _i1+1, x1_1, x1_0 { + p := x1_0 + idx := x1_1 + or := v_1 if or.Op != OpS390XORW { continue } _ = or.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - s0 := or.Args[_i2] + or_0 := or.Args[0] + or_1 := or.Args[1] + for _i2 := 0; _i2 <= 1; _i2, or_0, or_1 = _i2+1, or_1, or_0 { + s0 := or_0 if s0.Op != OpS390XSLWconst { continue } @@ -18589,11 +18917,13 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { continue } _ = x0.Args[2] - for _i3 := 0; _i3 <= 1; _i3++ { - if p != x0.Args[_i3] || idx != x0.Args[1^_i3] || mem != x0.Args[2] { + x0_0 := x0.Args[0] + x0_1 := x0.Args[1] + for _i3 := 0; _i3 <= 1; _i3, x0_0, x0_1 = _i3+1, x0_1, x0_0 { + if p != x0_0 || idx != x0_1 || mem != x0.Args[2] { continue } - y := or.Args[1^_i2] + y := or_1 if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { continue } @@ -18624,12 +18954,13 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XORWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORWconst [c] x) // cond: int32(c)==0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -18654,7 +18985,6 @@ func rewriteValueS390X_OpS390XORWconst_0(v *Value) bool { // result: (MOVDconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -18666,20 +18996,22 @@ func rewriteValueS390X_OpS390XORWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XORWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) // cond: ptr.Op != OpSB && is20Bit(off1+off2) // result: (ORWload [off1+off2] {sym} x ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -18697,15 +19029,14 @@ func rewriteValueS390X_OpS390XORWload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -18720,13 +19051,14 @@ func rewriteValueS390X_OpS390XORWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (ORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -18746,7 +19078,6 @@ func rewriteValueS390X_OpS390XORconst_0(v *Value) bool { // result: (MOVDconst [c|d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -18758,6 +19089,9 @@ func rewriteValueS390X_OpS390XORconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XORload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (ORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) // cond: isSamePtr(ptr1, ptr2) @@ -18766,10 +19100,8 @@ func rewriteValueS390X_OpS390XORload_0(v *Value) bool { t := v.Type off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr1 := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr1 := v_1 if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -18792,14 +19124,13 @@ func rewriteValueS390X_OpS390XORload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -18817,15 +19148,14 @@ func rewriteValueS390X_OpS390XORload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -18840,12 +19170,12 @@ func rewriteValueS390X_OpS390XORload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XRLL_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RLL x (MOVDconst [c])) // result: (RLLconst x [c&31]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -18858,12 +19188,12 @@ func rewriteValueS390X_OpS390XRLL_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XRLLG_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RLLG x (MOVDconst [c])) // result: (RLLGconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -18876,14 +19206,14 @@ func rewriteValueS390X_OpS390XRLLG_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SLD x (MOVDconst [c])) // result: (SLDconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -18896,20 +19226,19 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (AND (MOVDconst [c]) y)) // result: (SLD x (ANDWconst [c&63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XAND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpS390XMOVDconst { continue } c := v_1_0.AuxInt - y := v_1.Args[1^_i0] + y := v_1_1 v.reset(OpS390XSLD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) @@ -18924,9 +19253,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // cond: c&63 == 63 // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XANDWconst { break } @@ -18943,9 +19270,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVWreg y)) // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -18958,9 +19283,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVHreg y)) // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHreg { break } @@ -18973,9 +19296,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVBreg y)) // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBreg { break } @@ -18988,9 +19309,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVWZreg y)) // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -19003,9 +19322,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVHZreg y)) // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHZreg { break } @@ -19018,9 +19335,7 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVBZreg y)) // result: (SLD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBZreg { break } @@ -19033,14 +19348,14 @@ func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SLW x (MOVDconst [c])) // result: (SLWconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -19053,20 +19368,19 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (AND (MOVDconst [c]) y)) // result: (SLW x (ANDWconst [c&63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XAND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpS390XMOVDconst { continue } c := v_1_0.AuxInt - y := v_1.Args[1^_i0] + y := v_1_1 v.reset(OpS390XSLW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) @@ -19081,9 +19395,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // cond: c&63 == 63 // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XANDWconst { break } @@ -19100,9 +19412,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (MOVWreg y)) // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -19115,9 +19425,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (MOVHreg y)) // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHreg { break } @@ -19130,9 +19438,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (MOVBreg y)) // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBreg { break } @@ -19145,9 +19451,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (MOVWZreg y)) // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -19160,9 +19464,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (MOVHZreg y)) // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHZreg { break } @@ -19175,9 +19477,7 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { // match: (SLW x (MOVBZreg y)) // result: (SLW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBZreg { break } @@ -19190,14 +19490,14 @@ func rewriteValueS390X_OpS390XSLW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SRAD x (MOVDconst [c])) // result: (SRADconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -19210,20 +19510,19 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (AND (MOVDconst [c]) y)) // result: (SRAD x (ANDWconst [c&63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XAND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpS390XMOVDconst { continue } c := v_1_0.AuxInt - y := v_1.Args[1^_i0] + y := v_1_1 v.reset(OpS390XSRAD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) @@ -19238,9 +19537,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // cond: c&63 == 63 // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XANDWconst { break } @@ -19257,9 +19554,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (MOVWreg y)) // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -19272,9 +19567,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (MOVHreg y)) // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHreg { break } @@ -19287,9 +19580,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (MOVBreg y)) // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBreg { break } @@ -19302,9 +19593,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (MOVWZreg y)) // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -19317,9 +19606,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (MOVHZreg y)) // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHZreg { break } @@ -19332,9 +19619,7 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { // match: (SRAD x (MOVBZreg y)) // result: (SRAD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBZreg { break } @@ -19347,11 +19632,11 @@ func rewriteValueS390X_OpS390XSRAD_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRADconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRADconst [c] (MOVDconst [d])) // result: (MOVDconst [d>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -19363,14 +19648,14 @@ func rewriteValueS390X_OpS390XSRADconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SRAW x (MOVDconst [c])) // result: (SRAWconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -19383,20 +19668,19 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (AND (MOVDconst [c]) y)) // result: (SRAW x (ANDWconst [c&63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XAND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpS390XMOVDconst { continue } c := v_1_0.AuxInt - y := v_1.Args[1^_i0] + y := v_1_1 v.reset(OpS390XSRAW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) @@ -19411,9 +19695,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // cond: c&63 == 63 // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XANDWconst { break } @@ -19430,9 +19712,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (MOVWreg y)) // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -19445,9 +19725,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (MOVHreg y)) // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHreg { break } @@ -19460,9 +19738,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (MOVBreg y)) // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBreg { break } @@ -19475,9 +19751,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (MOVWZreg y)) // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -19490,9 +19764,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (MOVHZreg y)) // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHZreg { break } @@ -19505,9 +19777,7 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { // match: (SRAW x (MOVBZreg y)) // result: (SRAW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBZreg { break } @@ -19520,11 +19790,11 @@ func rewriteValueS390X_OpS390XSRAW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRAWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SRAWconst [c] (MOVDconst [d])) // result: (MOVDconst [int64(int32(d))>>uint64(c)]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -19536,14 +19806,14 @@ func rewriteValueS390X_OpS390XSRAWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SRD x (MOVDconst [c])) // result: (SRDconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -19556,20 +19826,19 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (AND (MOVDconst [c]) y)) // result: (SRD x (ANDWconst [c&63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XAND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpS390XMOVDconst { continue } c := v_1_0.AuxInt - y := v_1.Args[1^_i0] + y := v_1_1 v.reset(OpS390XSRD) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) @@ -19584,9 +19853,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // cond: c&63 == 63 // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XANDWconst { break } @@ -19603,9 +19870,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (MOVWreg y)) // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -19618,9 +19883,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (MOVHreg y)) // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHreg { break } @@ -19633,9 +19896,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (MOVBreg y)) // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBreg { break } @@ -19648,9 +19909,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (MOVWZreg y)) // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -19663,9 +19922,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (MOVHZreg y)) // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHZreg { break } @@ -19678,9 +19935,7 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { // match: (SRD x (MOVBZreg y)) // result: (SRD x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBZreg { break } @@ -19693,15 +19948,12 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRDconst_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (SRDconst [1] (SLDconst [1] (LGDR x))) // result: (LGDR (LPDFR x)) for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst || v_0.AuxInt != 1 { + if v.AuxInt != 1 || v_0.Op != OpS390XSLDconst || v_0.AuxInt != 1 { break } v_0_0 := v_0.Args[0] @@ -19720,14 +19972,14 @@ func rewriteValueS390X_OpS390XSRDconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SRW x (MOVDconst [c])) // result: (SRWconst x [c&63]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -19740,20 +19992,19 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (AND (MOVDconst [c]) y)) // result: (SRW x (ANDWconst [c&63] y)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XAND { break } _ = v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_1_0 := v_1.Args[_i0] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { if v_1_0.Op != OpS390XMOVDconst { continue } c := v_1_0.AuxInt - y := v_1.Args[1^_i0] + y := v_1_1 v.reset(OpS390XSRW) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) @@ -19768,9 +20019,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // cond: c&63 == 63 // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XANDWconst { break } @@ -19787,9 +20036,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVWreg y)) // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWreg { break } @@ -19802,9 +20049,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVHreg y)) // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHreg { break } @@ -19817,9 +20062,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVBreg y)) // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBreg { break } @@ -19832,9 +20075,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVWZreg y)) // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVWZreg { break } @@ -19847,9 +20088,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVHZreg y)) // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVHZreg { break } @@ -19862,9 +20101,7 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVBZreg y)) // result: (SRW x y) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVBZreg { break } @@ -19877,17 +20114,20 @@ func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSTM2_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem)) // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) // result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - w2 := v.Args[1] - w3 := v.Args[2] - x := v.Args[3] + p := v_0 + w2 := v_1 + w3 := v_2 + x := v_3 if x.Op != OpS390XSTM2 || x.AuxInt != i-8 || x.Aux != s { break } @@ -19916,16 +20156,15 @@ func rewriteValueS390X_OpS390XSTM2_0(v *Value) bool { for { i := v.AuxInt s := v.Aux - mem := v.Args[3] - p := v.Args[0] - v_1 := v.Args[1] + p := v_0 if v_1.Op != OpS390XSRDconst || v_1.AuxInt != 32 { break } x := v_1.Args[0] - if x != v.Args[2] { + if x != v_2 { break } + mem := v_3 v.reset(OpS390XMOVDstore) v.AuxInt = i v.Aux = s @@ -19937,17 +20176,20 @@ func rewriteValueS390X_OpS390XSTM2_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSTMG2_0(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem)) // cond: x.Uses == 1 && is20Bit(i-16) && clobber(x) // result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - p := v.Args[0] - w2 := v.Args[1] - w3 := v.Args[2] - x := v.Args[3] + p := v_0 + w2 := v_1 + w3 := v_2 + x := v_3 if x.Op != OpS390XSTMG2 || x.AuxInt != i-16 || x.Aux != s { break } @@ -19974,14 +20216,14 @@ func rewriteValueS390X_OpS390XSTMG2_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUB x (MOVDconst [c])) // cond: is32Bit(c) // result: (SUBconst x [c]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -19998,12 +20240,11 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { // cond: is32Bit(c) // result: (NEG (SUBconst x [c])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } c := v_0.AuxInt + x := v_1 if !(is32Bit(c)) { break } @@ -20017,8 +20258,8 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { // match: (SUB x x) // result: (MOVDconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpS390XMOVDconst) @@ -20030,9 +20271,8 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { // result: (SUBload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] + x := v_0 + g := v_1 if g.Op != OpS390XMOVDload { break } @@ -20055,13 +20295,14 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSUBE_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBE x y (FlagGT)) // result: (SUBC x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpS390XFlagGT { break } @@ -20073,10 +20314,8 @@ func rewriteValueS390X_OpS390XSUBE_0(v *Value) bool { // match: (SUBE x y (FlagOV)) // result: (SUBC x y) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpS390XFlagOV { break } @@ -20088,10 +20327,8 @@ func rewriteValueS390X_OpS390XSUBE_0(v *Value) bool { // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c)))))) // result: (SUBE x y c) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] + x := v_0 + y := v_1 if v_2.Op != OpSelect1 { break } @@ -20134,13 +20371,13 @@ func rewriteValueS390X_OpS390XSUBE_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBW x (MOVDconst [c])) // result: (SUBWconst x [int64(int32(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDconst { break } @@ -20153,12 +20390,11 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { // match: (SUBW (MOVDconst [c]) x) // result: (NEGW (SUBWconst x [int64(int32(c))])) for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } c := v_0.AuxInt + x := v_1 v.reset(OpS390XNEGW) v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type) v0.AuxInt = int64(int32(c)) @@ -20169,8 +20405,8 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { // match: (SUBW x x) // result: (MOVDconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpS390XMOVDconst) @@ -20182,9 +20418,8 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] + x := v_0 + g := v_1 if g.Op != OpS390XMOVWload { break } @@ -20209,9 +20444,8 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - g := v.Args[1] + x := v_0 + g := v_1 if g.Op != OpS390XMOVWZload { break } @@ -20234,12 +20468,13 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSUBWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBWconst [c] x) // cond: int32(c) == 0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -20252,7 +20487,7 @@ func rewriteValueS390X_OpS390XSUBWconst_0(v *Value) bool { // result: (ADDWconst [int64(int32(-c))] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 v.reset(OpS390XADDWconst) v.AuxInt = int64(int32(-c)) v.AddArg(x) @@ -20260,20 +20495,22 @@ func rewriteValueS390X_OpS390XSUBWconst_0(v *Value) bool { } } func rewriteValueS390X_OpS390XSUBWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) // cond: ptr.Op != OpSB && is20Bit(off1+off2) // result: (SUBWload [off1+off2] {sym} x ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -20291,15 +20528,14 @@ func rewriteValueS390X_OpS390XSUBWload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -20314,13 +20550,14 @@ func rewriteValueS390X_OpS390XSUBWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (SUBconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -20331,7 +20568,7 @@ func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { // result: (ADDconst [-c] x) for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(c != -(1 << 31)) { break } @@ -20344,7 +20581,6 @@ func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { // result: (MOVDconst [d-c]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -20358,7 +20594,6 @@ func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { // result: (ADDconst [-c-d] x) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XSUBconst { break } @@ -20375,6 +20610,9 @@ func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (SUBload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) // cond: isSamePtr(ptr1, ptr2) @@ -20383,10 +20621,8 @@ func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { t := v.Type off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr1 := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr1 := v_1 if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -20409,14 +20645,13 @@ func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -20434,15 +20669,14 @@ func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -20457,12 +20691,13 @@ func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XSumBytes2_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SumBytes2 x) // result: (ADDW (SRWconst x [8]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XADDW) v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8) v0.AuxInt = 8 @@ -20473,12 +20708,13 @@ func rewriteValueS390X_OpS390XSumBytes2_0(v *Value) bool { } } func rewriteValueS390X_OpS390XSumBytes4_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SumBytes4 x) // result: (SumBytes2 (ADDW (SRWconst x [16]) x)) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XSumBytes2) v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt16) v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16) @@ -20491,12 +20727,13 @@ func rewriteValueS390X_OpS390XSumBytes4_0(v *Value) bool { } } func rewriteValueS390X_OpS390XSumBytes8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SumBytes8 x) // result: (SumBytes4 (ADDW (SRDconst x [32]) x)) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XSumBytes4) v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt32) v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32) @@ -20509,14 +20746,14 @@ func rewriteValueS390X_OpS390XSumBytes8_0(v *Value) bool { } } func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XOR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (XORconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -20535,15 +20772,12 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // cond: d == 64-c // result: (RLLGconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLDconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XSRDconst { continue } @@ -20561,14 +20795,11 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XMOVDconst { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XMOVDconst { continue } @@ -20582,8 +20813,8 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // match: (XOR x x) // result: (MOVDconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpS390XMOVDconst) @@ -20595,10 +20826,9 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // result: (XORload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVDload { continue } @@ -20623,13 +20853,13 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XORW x (MOVDconst [c])) // result: (XORWconst [int64(int32(c))] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpS390XMOVDconst { continue } @@ -20645,15 +20875,12 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { // cond: d == 32-c // result: (RLLconst [c] x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpS390XSLWconst { continue } c := v_0.AuxInt x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpS390XSRWconst { continue } @@ -20671,8 +20898,8 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { // match: (XORW x x) // result: (MOVDconst [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpS390XMOVDconst) @@ -20684,10 +20911,9 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWload { continue } @@ -20714,10 +20940,9 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - g := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 if g.Op != OpS390XMOVWZload { continue } @@ -20742,12 +20967,13 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XXORWconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORWconst [c] x) // cond: int32(c)==0 // result: x for { c := v.AuxInt - x := v.Args[0] + x := v_0 if !(int32(c) == 0) { break } @@ -20760,7 +20986,6 @@ func rewriteValueS390X_OpS390XXORWconst_0(v *Value) bool { // result: (MOVDconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -20772,20 +20997,22 @@ func rewriteValueS390X_OpS390XXORWconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XXORWload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) // cond: ptr.Op != OpSB && is20Bit(off1+off2) // result: (XORWload [off1+off2] {sym} x ptr mem) for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -20803,15 +21030,14 @@ func rewriteValueS390X_OpS390XXORWload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -20826,13 +21052,14 @@ func rewriteValueS390X_OpS390XXORWload_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XXORconst_0(v *Value) bool { + v_0 := v.Args[0] // match: (XORconst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -20842,7 +21069,6 @@ func rewriteValueS390X_OpS390XXORconst_0(v *Value) bool { // result: (MOVDconst [c^d]) for { c := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpS390XMOVDconst { break } @@ -20854,6 +21080,9 @@ func rewriteValueS390X_OpS390XXORconst_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (XORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) // cond: isSamePtr(ptr1, ptr2) @@ -20862,10 +21091,8 @@ func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { t := v.Type off := v.AuxInt sym := v.Aux - _ = v.Args[2] - x := v.Args[0] - ptr1 := v.Args[1] - v_2 := v.Args[2] + x := v_0 + ptr1 := v_1 if v_2.Op != OpS390XFMOVDstore || v_2.AuxInt != off || v_2.Aux != sym { break } @@ -20888,14 +21115,13 @@ func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { for { off1 := v.AuxInt sym := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XADDconst { break } off2 := v_1.AuxInt ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(off1+off2)) { break } @@ -20913,15 +21139,14 @@ func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { for { o1 := v.AuxInt s1 := v.Aux - mem := v.Args[2] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpS390XMOVDaddr { break } o2 := v_1.AuxInt s2 := v_1.Aux ptr := v_1.Args[0] + mem := v_2 if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { break } @@ -20936,12 +21161,12 @@ func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { return false } func rewriteValueS390X_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select0 (Add64carry x y c)) // result: (Select0 (ADDE x y (Select1 (ADDCconst c [-1])))) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64carry { break } @@ -20965,7 +21190,6 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { // match: (Select0 (Sub64borrow x y c)) // result: (Select0 (SUBE x y (Select1 (SUBC (MOVDconst [0]) c)))) for { - v_0 := v.Args[0] if v_0.Op != OpSub64borrow { break } @@ -20992,7 +21216,6 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { // result: (ADDW val (Select0 tuple)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpS390XAddTupleFirst32 { break } @@ -21009,7 +21232,6 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { // result: (ADD val (Select0 tuple)) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpS390XAddTupleFirst64 { break } @@ -21025,7 +21247,6 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { // match: (Select0 (ADDCconst (MOVDconst [c]) [d])) // result: (MOVDconst [c+d]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XADDCconst { break } @@ -21042,7 +21263,6 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { // match: (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) // result: (MOVDconst [c-d]) for { - v_0 := v.Args[0] if v_0.Op != OpS390XSUBC { break } @@ -21064,12 +21284,12 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { return false } func rewriteValueS390X_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select1 (Add64carry x y c)) // result: (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 (ADDE x y (Select1 (ADDCconst c [-1])))))) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64carry { break } @@ -21103,7 +21323,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // match: (Select1 (Sub64borrow x y c)) // result: (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 (SUBE x y (Select1 (SUBC (MOVDconst [0]) c))))))) for { - v_0 := v.Args[0] if v_0.Op != OpSub64borrow { break } @@ -21140,7 +21359,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // match: (Select1 (AddTupleFirst32 _ tuple)) // result: (Select1 tuple) for { - v_0 := v.Args[0] if v_0.Op != OpS390XAddTupleFirst32 { break } @@ -21152,7 +21370,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // match: (Select1 (AddTupleFirst64 _ tuple)) // result: (Select1 tuple) for { - v_0 := v.Args[0] if v_0.Op != OpS390XAddTupleFirst64 { break } @@ -21165,7 +21382,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // cond: uint64(c+d) >= uint64(c) && c+d == 0 // result: (FlagEQ) for { - v_0 := v.Args[0] if v_0.Op != OpS390XADDCconst { break } @@ -21185,7 +21401,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // cond: uint64(c+d) >= uint64(c) && c+d != 0 // result: (FlagLT) for { - v_0 := v.Args[0] if v_0.Op != OpS390XADDCconst { break } @@ -21205,7 +21420,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // cond: uint64(d) <= uint64(c) && c-d == 0 // result: (FlagGT) for { - v_0 := v.Args[0] if v_0.Op != OpS390XSUBC { break } @@ -21230,7 +21444,6 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { // cond: uint64(d) <= uint64(c) && c-d != 0 // result: (FlagOV) for { - v_0 := v.Args[0] if v_0.Op != OpS390XSUBC { break } @@ -21254,72 +21467,79 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { return false } func rewriteValueS390X_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVHreg) v.AddArg(x) return true } } func rewriteValueS390X_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to64 x) // result: (MOVHreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVHreg) v.AddArg(x) return true } } func rewriteValueS390X_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt32to64 x) // result: (MOVWreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVWreg) v.AddArg(x) return true } } func rewriteValueS390X_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBreg) v.AddArg(x) return true } } func rewriteValueS390X_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBreg) v.AddArg(x) return true } } func rewriteValueS390X_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to64 x) // result: (MOVBreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBreg) v.AddArg(x) return true } } func rewriteValueS390X_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Slicemask x) // result: (SRADconst (NEG x) [63]) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpS390XSRADconst) v.AuxInt = 63 v0 := b.NewValue0(v.Pos, OpS390XNEG, t) @@ -21329,22 +21549,24 @@ func rewriteValueS390X_OpSlicemask_0(v *Value) bool { } } func rewriteValueS390X_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (FSQRT x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFSQRT) v.AddArg(x) return true } } func rewriteValueS390X_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (CALLstatic [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpS390XCALLstatic) v.AuxInt = argwid v.Aux = target @@ -21353,14 +21575,17 @@ func rewriteValueS390X_OpStaticCall_0(v *Value) bool { } } func rewriteValueS390X_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) // result: (FMOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { break } @@ -21375,9 +21600,9 @@ func rewriteValueS390X_OpStore_0(v *Value) bool { // result: (FMOVSstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { break } @@ -21392,9 +21617,9 @@ func rewriteValueS390X_OpStore_0(v *Value) bool { // result: (MOVDstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8) { break } @@ -21409,9 +21634,9 @@ func rewriteValueS390X_OpStore_0(v *Value) bool { // result: (MOVWstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4) { break } @@ -21426,9 +21651,9 @@ func rewriteValueS390X_OpStore_0(v *Value) bool { // result: (MOVHstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -21443,9 +21668,9 @@ func rewriteValueS390X_OpStore_0(v *Value) bool { // result: (MOVBstore ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -21458,11 +21683,13 @@ func rewriteValueS390X_OpStore_0(v *Value) bool { return false } func rewriteValueS390X_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (SUBW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSUBW) v.AddArg(x) v.AddArg(y) @@ -21470,11 +21697,13 @@ func rewriteValueS390X_OpSub16_0(v *Value) bool { } } func rewriteValueS390X_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (SUBW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSUBW) v.AddArg(x) v.AddArg(y) @@ -21482,11 +21711,13 @@ func rewriteValueS390X_OpSub32_0(v *Value) bool { } } func rewriteValueS390X_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (FSUBS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFSUBS) v.AddArg(x) v.AddArg(y) @@ -21494,11 +21725,13 @@ func rewriteValueS390X_OpSub32F_0(v *Value) bool { } } func rewriteValueS390X_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64 x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSUB) v.AddArg(x) v.AddArg(y) @@ -21506,11 +21739,13 @@ func rewriteValueS390X_OpSub64_0(v *Value) bool { } } func rewriteValueS390X_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (FSUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XFSUB) v.AddArg(x) v.AddArg(y) @@ -21518,11 +21753,13 @@ func rewriteValueS390X_OpSub64F_0(v *Value) bool { } } func rewriteValueS390X_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (SUBW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSUBW) v.AddArg(x) v.AddArg(y) @@ -21530,11 +21767,13 @@ func rewriteValueS390X_OpSub8_0(v *Value) bool { } } func rewriteValueS390X_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (SUB x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XSUB) v.AddArg(x) v.AddArg(y) @@ -21542,10 +21781,11 @@ func rewriteValueS390X_OpSubPtr_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc x) // result: (FIDBR [5] x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XFIDBR) v.AuxInt = 5 v.AddArg(x) @@ -21553,10 +21793,11 @@ func rewriteValueS390X_OpTrunc_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21564,10 +21805,11 @@ func rewriteValueS390X_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21575,10 +21817,11 @@ func rewriteValueS390X_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21586,10 +21829,11 @@ func rewriteValueS390X_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21597,10 +21841,11 @@ func rewriteValueS390X_OpTrunc64to16_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21608,10 +21853,11 @@ func rewriteValueS390X_OpTrunc64to32_0(v *Value) bool { } } func rewriteValueS390X_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -21619,13 +21865,16 @@ func rewriteValueS390X_OpTrunc64to8_0(v *Value) bool { } } func rewriteValueS390X_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpS390XLoweredWB) v.Aux = fn v.AddArg(destptr) @@ -21635,11 +21884,13 @@ func rewriteValueS390X_OpWB_0(v *Value) bool { } } func rewriteValueS390X_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (XORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XXORW) v.AddArg(x) v.AddArg(y) @@ -21647,11 +21898,13 @@ func rewriteValueS390X_OpXor16_0(v *Value) bool { } } func rewriteValueS390X_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (XORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XXORW) v.AddArg(x) v.AddArg(y) @@ -21659,11 +21912,13 @@ func rewriteValueS390X_OpXor32_0(v *Value) bool { } } func rewriteValueS390X_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor64 x y) // result: (XOR x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XXOR) v.AddArg(x) v.AddArg(y) @@ -21671,11 +21926,13 @@ func rewriteValueS390X_OpXor64_0(v *Value) bool { } } func rewriteValueS390X_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (XORW x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpS390XXORW) v.AddArg(x) v.AddArg(y) @@ -21683,6 +21940,8 @@ func rewriteValueS390X_OpXor8_0(v *Value) bool { } } func rewriteValueS390X_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Zero [0] _ mem) // result: mem @@ -21690,7 +21949,7 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -21702,8 +21961,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -21716,8 +21975,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVHstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -21730,8 +21989,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVWstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -21744,8 +22003,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVDstoreconst) v.AuxInt = 0 v.AddArg(destptr) @@ -21758,8 +22017,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(0, 2) v.AddArg(destptr) @@ -21776,8 +22035,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVBstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -21794,8 +22053,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVHstoreconst) v.AuxInt = makeValAndOff(0, 4) v.AddArg(destptr) @@ -21812,8 +22071,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpS390XMOVWstoreconst) v.AuxInt = makeValAndOff(0, 3) v.AddArg(destptr) @@ -21829,8 +22088,8 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { // result: (CLEAR [makeValAndOff(s, 0)] destptr mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s > 0 && s <= 1024) { break } @@ -21843,14 +22102,16 @@ func rewriteValueS390X_OpZero_0(v *Value) bool { return false } func rewriteValueS390X_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Zero [s] destptr mem) // cond: s > 1024 // result: (LoweredZero [s%256] destptr (ADDconst destptr [(s/256)*256]) mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s > 1024) { break } @@ -21867,60 +22128,66 @@ func rewriteValueS390X_OpZero_10(v *Value) bool { return false } func rewriteValueS390X_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 x) // result: (MOVHZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVHZreg) v.AddArg(x) return true } } func rewriteValueS390X_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to64 x) // result: (MOVHZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVHZreg) v.AddArg(x) return true } } func rewriteValueS390X_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt32to64 x) // result: (MOVWZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVWZreg) v.AddArg(x) return true } } func rewriteValueS390X_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v.AddArg(x) return true } } func rewriteValueS390X_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v.AddArg(x) return true } } func rewriteValueS390X_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to64 x) // result: (MOVBZreg x) for { - x := v.Args[0] + x := v_0 v.reset(OpS390XMOVBZreg) v.AddArg(x) return true diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index a7f3f39753..5993e9bed1 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -578,21 +578,24 @@ func rewriteValueWasm(v *Value) bool { return false } func rewriteValueWasm_OpAbs_0(v *Value) bool { + v_0 := v.Args[0] // match: (Abs x) // result: (F64Abs x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Abs) v.AddArg(x) return true } } func rewriteValueWasm_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add16 x y) // result: (I64Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Add) v.AddArg(x) v.AddArg(y) @@ -600,11 +603,13 @@ func rewriteValueWasm_OpAdd16_0(v *Value) bool { } } func rewriteValueWasm_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32 x y) // result: (I64Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Add) v.AddArg(x) v.AddArg(y) @@ -612,11 +617,13 @@ func rewriteValueWasm_OpAdd32_0(v *Value) bool { } } func rewriteValueWasm_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F x y) // result: (F32Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Add) v.AddArg(x) v.AddArg(y) @@ -624,11 +631,13 @@ func rewriteValueWasm_OpAdd32F_0(v *Value) bool { } } func rewriteValueWasm_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64 x y) // result: (I64Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Add) v.AddArg(x) v.AddArg(y) @@ -636,11 +645,13 @@ func rewriteValueWasm_OpAdd64_0(v *Value) bool { } } func rewriteValueWasm_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F x y) // result: (F64Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Add) v.AddArg(x) v.AddArg(y) @@ -648,11 +659,13 @@ func rewriteValueWasm_OpAdd64F_0(v *Value) bool { } } func rewriteValueWasm_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add8 x y) // result: (I64Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Add) v.AddArg(x) v.AddArg(y) @@ -660,11 +673,13 @@ func rewriteValueWasm_OpAdd8_0(v *Value) bool { } } func rewriteValueWasm_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x y) // result: (I64Add x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Add) v.AddArg(x) v.AddArg(y) @@ -672,11 +687,12 @@ func rewriteValueWasm_OpAddPtr_0(v *Value) bool { } } func rewriteValueWasm_OpAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (Addr {sym} base) // result: (LoweredAddr {sym} base) for { sym := v.Aux - base := v.Args[0] + base := v_0 v.reset(OpWasmLoweredAddr) v.Aux = sym v.AddArg(base) @@ -684,11 +700,13 @@ func rewriteValueWasm_OpAddr_0(v *Value) bool { } } func rewriteValueWasm_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And16 x y) // result: (I64And x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64And) v.AddArg(x) v.AddArg(y) @@ -696,11 +714,13 @@ func rewriteValueWasm_OpAnd16_0(v *Value) bool { } } func rewriteValueWasm_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And32 x y) // result: (I64And x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64And) v.AddArg(x) v.AddArg(y) @@ -708,11 +728,13 @@ func rewriteValueWasm_OpAnd32_0(v *Value) bool { } } func rewriteValueWasm_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And64 x y) // result: (I64And x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64And) v.AddArg(x) v.AddArg(y) @@ -720,11 +742,13 @@ func rewriteValueWasm_OpAnd64_0(v *Value) bool { } } func rewriteValueWasm_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (And8 x y) // result: (I64And x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64And) v.AddArg(x) v.AddArg(y) @@ -732,11 +756,13 @@ func rewriteValueWasm_OpAnd8_0(v *Value) bool { } } func rewriteValueWasm_OpAndB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AndB x y) // result: (I64And x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64And) v.AddArg(x) v.AddArg(y) @@ -744,12 +770,13 @@ func rewriteValueWasm_OpAndB_0(v *Value) bool { } } func rewriteValueWasm_OpBitLen64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (I64Sub (I64Const [64]) (I64Clz x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 64 @@ -761,23 +788,27 @@ func rewriteValueWasm_OpBitLen64_0(v *Value) bool { } } func rewriteValueWasm_OpCeil_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ceil x) // result: (F64Ceil x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Ceil) v.AddArg(x) return true } } func rewriteValueWasm_OpClosureCall_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (ClosureCall [argwid] entry closure mem) // result: (LoweredClosureCall [argwid] entry closure mem) for { argwid := v.AuxInt - mem := v.Args[2] - entry := v.Args[0] - closure := v.Args[1] + entry := v_0 + closure := v_1 + mem := v_2 v.reset(OpWasmLoweredClosureCall) v.AuxInt = argwid v.AddArg(entry) @@ -787,12 +818,13 @@ func rewriteValueWasm_OpClosureCall_0(v *Value) bool { } } func rewriteValueWasm_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com16 x) // result: (I64Xor x (I64Const [-1])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Xor) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -802,12 +834,13 @@ func rewriteValueWasm_OpCom16_0(v *Value) bool { } } func rewriteValueWasm_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com32 x) // result: (I64Xor x (I64Const [-1])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Xor) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -817,12 +850,13 @@ func rewriteValueWasm_OpCom32_0(v *Value) bool { } } func rewriteValueWasm_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com64 x) // result: (I64Xor x (I64Const [-1])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Xor) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -832,12 +866,13 @@ func rewriteValueWasm_OpCom64_0(v *Value) bool { } } func rewriteValueWasm_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com8 x) // result: (I64Xor x (I64Const [-1])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Xor) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -847,13 +882,16 @@ func rewriteValueWasm_OpCom8_0(v *Value) bool { } } func rewriteValueWasm_OpCondSelect_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (CondSelect x y cond) // result: (Select x y cond) for { t := v.Type - cond := v.Args[2] - x := v.Args[0] - y := v.Args[1] + x := v_0 + y := v_1 + cond := v_2 v.reset(OpWasmSelect) v.Type = t v.AddArg(x) @@ -942,12 +980,14 @@ func rewriteValueWasm_OpConstNil_0(v *Value) bool { } } func rewriteValueWasm_OpConvert_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Convert x mem) // result: (LoweredConvert x mem) for { t := v.Type - mem := v.Args[1] - x := v.Args[0] + x := v_0 + mem := v_1 v.reset(OpWasmLoweredConvert) v.Type = t v.AddArg(x) @@ -956,11 +996,13 @@ func rewriteValueWasm_OpConvert_0(v *Value) bool { } } func rewriteValueWasm_OpCopysign_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Copysign x y) // result: (F64Copysign x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Copysign) v.AddArg(x) v.AddArg(y) @@ -968,12 +1010,13 @@ func rewriteValueWasm_OpCopysign_0(v *Value) bool { } } func rewriteValueWasm_OpCtz16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) // result: (I64Ctz (I64Or x (I64Const [0x10000]))) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) v0.AddArg(x) @@ -985,22 +1028,24 @@ func rewriteValueWasm_OpCtz16_0(v *Value) bool { } } func rewriteValueWasm_OpCtz16NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz16NonZero x) // result: (I64Ctz x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v.AddArg(x) return true } } func rewriteValueWasm_OpCtz32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz32 x) // result: (I64Ctz (I64Or x (I64Const [0x100000000]))) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) v0.AddArg(x) @@ -1012,42 +1057,46 @@ func rewriteValueWasm_OpCtz32_0(v *Value) bool { } } func rewriteValueWasm_OpCtz32NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz32NonZero x) // result: (I64Ctz x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v.AddArg(x) return true } } func rewriteValueWasm_OpCtz64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz64 x) // result: (I64Ctz x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v.AddArg(x) return true } } func rewriteValueWasm_OpCtz64NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz64NonZero x) // result: (I64Ctz x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v.AddArg(x) return true } } func rewriteValueWasm_OpCtz8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) // result: (I64Ctz (I64Or x (I64Const [0x100]))) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) v0.AddArg(x) @@ -1059,72 +1108,79 @@ func rewriteValueWasm_OpCtz8_0(v *Value) bool { } } func rewriteValueWasm_OpCtz8NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz8NonZero x) // result: (I64Ctz x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Ctz) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 x) // result: (I64TruncSatF32S x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF32S) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt32Fto32U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32U x) // result: (I64TruncSatF32U x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF32U) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64 x) // result: (I64TruncSatF32S x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF32S) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F x) // result: (F64PromoteF32 x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64PromoteF32) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt32Fto64U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64U x) // result: (I64TruncSatF32U x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF32U) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt32Uto32F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32Uto32F x) // result: (F32ConvertI64U (ZeroExt32to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF32ConvertI64U) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1133,12 +1189,13 @@ func rewriteValueWasm_OpCvt32Uto32F_0(v *Value) bool { } } func rewriteValueWasm_OpCvt32Uto64F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32Uto64F x) // result: (F64ConvertI64U (ZeroExt32to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64ConvertI64U) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1147,12 +1204,13 @@ func rewriteValueWasm_OpCvt32Uto64F_0(v *Value) bool { } } func rewriteValueWasm_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32to32F x) // result: (F32ConvertI64S (SignExt32to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF32ConvertI64S) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1161,12 +1219,13 @@ func rewriteValueWasm_OpCvt32to32F_0(v *Value) bool { } } func rewriteValueWasm_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Cvt32to64F x) // result: (F64ConvertI64S (SignExt32to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64ConvertI64S) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1175,103 +1234,114 @@ func rewriteValueWasm_OpCvt32to64F_0(v *Value) bool { } } func rewriteValueWasm_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 x) // result: (I64TruncSatF64S x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF64S) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F x) // result: (F32DemoteF64 x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF32DemoteF64) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64Fto32U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32U x) // result: (I64TruncSatF64U x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF64U) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64 x) // result: (I64TruncSatF64S x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF64S) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64Fto64U_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64U x) // result: (I64TruncSatF64U x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64TruncSatF64U) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64Uto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Uto32F x) // result: (F32ConvertI64U x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF32ConvertI64U) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64Uto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Uto64F x) // result: (F64ConvertI64U x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64ConvertI64U) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to32F x) // result: (F32ConvertI64S x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF32ConvertI64S) v.AddArg(x) return true } } func rewriteValueWasm_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to64F x) // result: (F64ConvertI64S x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64ConvertI64S) v.AddArg(x) return true } } func rewriteValueWasm_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 x y) // result: (I64DivS (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -1283,13 +1353,15 @@ func rewriteValueWasm_OpDiv16_0(v *Value) bool { } } func rewriteValueWasm_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1301,13 +1373,15 @@ func rewriteValueWasm_OpDiv16u_0(v *Value) bool { } } func rewriteValueWasm_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32 x y) // result: (I64DivS (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1319,11 +1393,13 @@ func rewriteValueWasm_OpDiv32_0(v *Value) bool { } } func rewriteValueWasm_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div32F x y) // result: (F32Div x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Div) v.AddArg(x) v.AddArg(y) @@ -1331,13 +1407,15 @@ func rewriteValueWasm_OpDiv32F_0(v *Value) bool { } } func rewriteValueWasm_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div32u x y) // result: (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1349,11 +1427,13 @@ func rewriteValueWasm_OpDiv32u_0(v *Value) bool { } } func rewriteValueWasm_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64 x y) // result: (I64DivS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivS) v.AddArg(x) v.AddArg(y) @@ -1361,11 +1441,13 @@ func rewriteValueWasm_OpDiv64_0(v *Value) bool { } } func rewriteValueWasm_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64F x y) // result: (F64Div x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Div) v.AddArg(x) v.AddArg(y) @@ -1373,11 +1455,13 @@ func rewriteValueWasm_OpDiv64F_0(v *Value) bool { } } func rewriteValueWasm_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Div64u x y) // result: (I64DivU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivU) v.AddArg(x) v.AddArg(y) @@ -1385,13 +1469,15 @@ func rewriteValueWasm_OpDiv64u_0(v *Value) bool { } } func rewriteValueWasm_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (I64DivS (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -1403,13 +1489,15 @@ func rewriteValueWasm_OpDiv8_0(v *Value) bool { } } func rewriteValueWasm_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64DivU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -1421,13 +1509,15 @@ func rewriteValueWasm_OpDiv8u_0(v *Value) bool { } } func rewriteValueWasm_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1439,13 +1529,15 @@ func rewriteValueWasm_OpEq16_0(v *Value) bool { } } func rewriteValueWasm_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x y) // result: (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1457,11 +1549,13 @@ func rewriteValueWasm_OpEq32_0(v *Value) bool { } } func rewriteValueWasm_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq32F x y) // result: (F32Eq x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Eq) v.AddArg(x) v.AddArg(y) @@ -1469,11 +1563,13 @@ func rewriteValueWasm_OpEq32F_0(v *Value) bool { } } func rewriteValueWasm_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq64 x y) // result: (I64Eq x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Eq) v.AddArg(x) v.AddArg(y) @@ -1481,11 +1577,13 @@ func rewriteValueWasm_OpEq64_0(v *Value) bool { } } func rewriteValueWasm_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq64F x y) // result: (F64Eq x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Eq) v.AddArg(x) v.AddArg(y) @@ -1493,13 +1591,15 @@ func rewriteValueWasm_OpEq64F_0(v *Value) bool { } } func rewriteValueWasm_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -1511,11 +1611,13 @@ func rewriteValueWasm_OpEq8_0(v *Value) bool { } } func rewriteValueWasm_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EqB x y) // result: (I64Eq x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Eq) v.AddArg(x) v.AddArg(y) @@ -1523,11 +1625,13 @@ func rewriteValueWasm_OpEqB_0(v *Value) bool { } } func rewriteValueWasm_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EqPtr x y) // result: (I64Eq x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Eq) v.AddArg(x) v.AddArg(y) @@ -1535,23 +1639,26 @@ func rewriteValueWasm_OpEqPtr_0(v *Value) bool { } } func rewriteValueWasm_OpFloor_0(v *Value) bool { + v_0 := v.Args[0] // match: (Floor x) // result: (F64Floor x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Floor) v.AddArg(x) return true } } func rewriteValueWasm_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16 x y) // result: (I64GeS (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -1563,13 +1670,15 @@ func rewriteValueWasm_OpGeq16_0(v *Value) bool { } } func rewriteValueWasm_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq16U x y) // result: (I64GeU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1581,13 +1690,15 @@ func rewriteValueWasm_OpGeq16U_0(v *Value) bool { } } func rewriteValueWasm_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32 x y) // result: (I64GeS (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1599,11 +1710,13 @@ func rewriteValueWasm_OpGeq32_0(v *Value) bool { } } func rewriteValueWasm_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq32F x y) // result: (F32Ge x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Ge) v.AddArg(x) v.AddArg(y) @@ -1611,13 +1724,15 @@ func rewriteValueWasm_OpGeq32F_0(v *Value) bool { } } func rewriteValueWasm_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq32U x y) // result: (I64GeU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1629,11 +1744,13 @@ func rewriteValueWasm_OpGeq32U_0(v *Value) bool { } } func rewriteValueWasm_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64 x y) // result: (I64GeS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeS) v.AddArg(x) v.AddArg(y) @@ -1641,11 +1758,13 @@ func rewriteValueWasm_OpGeq64_0(v *Value) bool { } } func rewriteValueWasm_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64F x y) // result: (F64Ge x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Ge) v.AddArg(x) v.AddArg(y) @@ -1653,11 +1772,13 @@ func rewriteValueWasm_OpGeq64F_0(v *Value) bool { } } func rewriteValueWasm_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64U x y) // result: (I64GeU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeU) v.AddArg(x) v.AddArg(y) @@ -1665,13 +1786,15 @@ func rewriteValueWasm_OpGeq64U_0(v *Value) bool { } } func rewriteValueWasm_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8 x y) // result: (I64GeS (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -1683,13 +1806,15 @@ func rewriteValueWasm_OpGeq8_0(v *Value) bool { } } func rewriteValueWasm_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq8U x y) // result: (I64GeU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GeU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -1725,13 +1850,15 @@ func rewriteValueWasm_OpGetClosurePtr_0(v *Value) bool { } } func rewriteValueWasm_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16 x y) // result: (I64GtS (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -1743,13 +1870,15 @@ func rewriteValueWasm_OpGreater16_0(v *Value) bool { } } func rewriteValueWasm_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater16U x y) // result: (I64GtU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1761,13 +1890,15 @@ func rewriteValueWasm_OpGreater16U_0(v *Value) bool { } } func rewriteValueWasm_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32 x y) // result: (I64GtS (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1779,11 +1910,13 @@ func rewriteValueWasm_OpGreater32_0(v *Value) bool { } } func rewriteValueWasm_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32F x y) // result: (F32Gt x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Gt) v.AddArg(x) v.AddArg(y) @@ -1791,13 +1924,15 @@ func rewriteValueWasm_OpGreater32F_0(v *Value) bool { } } func rewriteValueWasm_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater32U x y) // result: (I64GtU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -1809,11 +1944,13 @@ func rewriteValueWasm_OpGreater32U_0(v *Value) bool { } } func rewriteValueWasm_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64 x y) // result: (I64GtS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtS) v.AddArg(x) v.AddArg(y) @@ -1821,11 +1958,13 @@ func rewriteValueWasm_OpGreater64_0(v *Value) bool { } } func rewriteValueWasm_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64F x y) // result: (F64Gt x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Gt) v.AddArg(x) v.AddArg(y) @@ -1833,11 +1972,13 @@ func rewriteValueWasm_OpGreater64F_0(v *Value) bool { } } func rewriteValueWasm_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64U x y) // result: (I64GtU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtU) v.AddArg(x) v.AddArg(y) @@ -1845,13 +1986,15 @@ func rewriteValueWasm_OpGreater64U_0(v *Value) bool { } } func rewriteValueWasm_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8 x y) // result: (I64GtS (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -1863,13 +2006,15 @@ func rewriteValueWasm_OpGreater8_0(v *Value) bool { } } func rewriteValueWasm_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater8U x y) // result: (I64GtU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64GtU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -1881,12 +2026,14 @@ func rewriteValueWasm_OpGreater8U_0(v *Value) bool { } } func rewriteValueWasm_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argwid] entry mem) // result: (LoweredInterCall [argwid] entry mem) for { argwid := v.AuxInt - mem := v.Args[1] - entry := v.Args[0] + entry := v_0 + mem := v_1 v.reset(OpWasmLoweredInterCall) v.AuxInt = argwid v.AddArg(entry) @@ -1895,11 +2042,13 @@ func rewriteValueWasm_OpInterCall_0(v *Value) bool { } } func rewriteValueWasm_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsInBounds idx len) // result: (I64LtU idx len) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpWasmI64LtU) v.AddArg(idx) v.AddArg(len) @@ -1907,12 +2056,13 @@ func rewriteValueWasm_OpIsInBounds_0(v *Value) bool { } } func rewriteValueWasm_OpIsNonNil_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (IsNonNil p) // result: (I64Eqz (I64Eqz p)) for { - p := v.Args[0] + p := v_0 v.reset(OpWasmI64Eqz) v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool) v0.AddArg(p) @@ -1921,11 +2071,13 @@ func rewriteValueWasm_OpIsNonNil_0(v *Value) bool { } } func rewriteValueWasm_OpIsSliceInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsSliceInBounds idx len) // result: (I64LeU idx len) for { - len := v.Args[1] - idx := v.Args[0] + idx := v_0 + len := v_1 v.reset(OpWasmI64LeU) v.AddArg(idx) v.AddArg(len) @@ -1933,13 +2085,15 @@ func rewriteValueWasm_OpIsSliceInBounds_0(v *Value) bool { } } func rewriteValueWasm_OpLeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (I64LeS (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -1951,13 +2105,15 @@ func rewriteValueWasm_OpLeq16_0(v *Value) bool { } } func rewriteValueWasm_OpLeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x y) // result: (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -1969,13 +2125,15 @@ func rewriteValueWasm_OpLeq16U_0(v *Value) bool { } } func rewriteValueWasm_OpLeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32 x y) // result: (I64LeS (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -1987,11 +2145,13 @@ func rewriteValueWasm_OpLeq32_0(v *Value) bool { } } func rewriteValueWasm_OpLeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Leq32F x y) // result: (F32Le x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Le) v.AddArg(x) v.AddArg(y) @@ -1999,13 +2159,15 @@ func rewriteValueWasm_OpLeq32F_0(v *Value) bool { } } func rewriteValueWasm_OpLeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32U x y) // result: (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -2017,11 +2179,13 @@ func rewriteValueWasm_OpLeq32U_0(v *Value) bool { } } func rewriteValueWasm_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Leq64 x y) // result: (I64LeS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeS) v.AddArg(x) v.AddArg(y) @@ -2029,11 +2193,13 @@ func rewriteValueWasm_OpLeq64_0(v *Value) bool { } } func rewriteValueWasm_OpLeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Leq64F x y) // result: (F64Le x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Le) v.AddArg(x) v.AddArg(y) @@ -2041,11 +2207,13 @@ func rewriteValueWasm_OpLeq64F_0(v *Value) bool { } } func rewriteValueWasm_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Leq64U x y) // result: (I64LeU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeU) v.AddArg(x) v.AddArg(y) @@ -2053,13 +2221,15 @@ func rewriteValueWasm_OpLeq64U_0(v *Value) bool { } } func rewriteValueWasm_OpLeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (I64LeS (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -2071,13 +2241,15 @@ func rewriteValueWasm_OpLeq8_0(v *Value) bool { } } func rewriteValueWasm_OpLeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x y) // result: (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LeU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -2089,13 +2261,15 @@ func rewriteValueWasm_OpLeq8U_0(v *Value) bool { } } func rewriteValueWasm_OpLess16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (I64LtS (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -2107,13 +2281,15 @@ func rewriteValueWasm_OpLess16_0(v *Value) bool { } } func rewriteValueWasm_OpLess16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U x y) // result: (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -2125,13 +2301,15 @@ func rewriteValueWasm_OpLess16U_0(v *Value) bool { } } func rewriteValueWasm_OpLess32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32 x y) // result: (I64LtS (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -2143,11 +2321,13 @@ func rewriteValueWasm_OpLess32_0(v *Value) bool { } } func rewriteValueWasm_OpLess32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less32F x y) // result: (F32Lt x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Lt) v.AddArg(x) v.AddArg(y) @@ -2155,13 +2335,15 @@ func rewriteValueWasm_OpLess32F_0(v *Value) bool { } } func rewriteValueWasm_OpLess32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32U x y) // result: (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -2173,11 +2355,13 @@ func rewriteValueWasm_OpLess32U_0(v *Value) bool { } } func rewriteValueWasm_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64 x y) // result: (I64LtS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtS) v.AddArg(x) v.AddArg(y) @@ -2185,11 +2369,13 @@ func rewriteValueWasm_OpLess64_0(v *Value) bool { } } func rewriteValueWasm_OpLess64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64F x y) // result: (F64Lt x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Lt) v.AddArg(x) v.AddArg(y) @@ -2197,11 +2383,13 @@ func rewriteValueWasm_OpLess64F_0(v *Value) bool { } } func rewriteValueWasm_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Less64U x y) // result: (I64LtU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtU) v.AddArg(x) v.AddArg(y) @@ -2209,13 +2397,15 @@ func rewriteValueWasm_OpLess64U_0(v *Value) bool { } } func rewriteValueWasm_OpLess8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (I64LtS (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -2227,13 +2417,15 @@ func rewriteValueWasm_OpLess8_0(v *Value) bool { } } func rewriteValueWasm_OpLess8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U x y) // result: (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64LtU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -2245,13 +2437,15 @@ func rewriteValueWasm_OpLess8U_0(v *Value) bool { } } func rewriteValueWasm_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Load ptr mem) // cond: is32BitFloat(t) // result: (F32Load ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is32BitFloat(t)) { break } @@ -2265,8 +2459,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (F64Load ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitFloat(t)) { break } @@ -2280,8 +2474,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 8) { break } @@ -2295,8 +2489,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load32U ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 4 && !t.IsSigned()) { break } @@ -2310,8 +2504,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load32S ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 4 && t.IsSigned()) { break } @@ -2325,8 +2519,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load16U ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 2 && !t.IsSigned()) { break } @@ -2340,8 +2534,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load16S ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 2 && t.IsSigned()) { break } @@ -2355,8 +2549,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load8U ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 1 && !t.IsSigned()) { break } @@ -2370,8 +2564,8 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { // result: (I64Load8S ptr mem) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.Size() == 1 && t.IsSigned()) { break } @@ -2383,12 +2577,12 @@ func rewriteValueWasm_OpLoad_0(v *Value) bool { return false } func rewriteValueWasm_OpLocalAddr_0(v *Value) bool { + v_0 := v.Args[0] // match: (LocalAddr {sym} base _) // result: (LoweredAddr {sym} base) for { sym := v.Aux - _ = v.Args[1] - base := v.Args[0] + base := v_0 v.reset(OpWasmLoweredAddr) v.Aux = sym v.AddArg(base) @@ -2396,13 +2590,15 @@ func rewriteValueWasm_OpLocalAddr_0(v *Value) bool { } } func rewriteValueWasm_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) // result: (Lsh64x64 x (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2412,13 +2608,15 @@ func rewriteValueWasm_OpLsh16x16_0(v *Value) bool { } } func rewriteValueWasm_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) // result: (Lsh64x64 x (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -2428,11 +2626,13 @@ func rewriteValueWasm_OpLsh16x32_0(v *Value) bool { } } func rewriteValueWasm_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh16x64 x y) // result: (Lsh64x64 x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v.AddArg(y) @@ -2440,13 +2640,15 @@ func rewriteValueWasm_OpLsh16x64_0(v *Value) bool { } } func rewriteValueWasm_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) // result: (Lsh64x64 x (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -2456,13 +2658,15 @@ func rewriteValueWasm_OpLsh16x8_0(v *Value) bool { } } func rewriteValueWasm_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) // result: (Lsh64x64 x (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2472,13 +2676,15 @@ func rewriteValueWasm_OpLsh32x16_0(v *Value) bool { } } func rewriteValueWasm_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) // result: (Lsh64x64 x (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -2488,11 +2694,13 @@ func rewriteValueWasm_OpLsh32x32_0(v *Value) bool { } } func rewriteValueWasm_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh32x64 x y) // result: (Lsh64x64 x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v.AddArg(y) @@ -2500,13 +2708,15 @@ func rewriteValueWasm_OpLsh32x64_0(v *Value) bool { } } func rewriteValueWasm_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) // result: (Lsh64x64 x (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -2516,13 +2726,15 @@ func rewriteValueWasm_OpLsh32x8_0(v *Value) bool { } } func rewriteValueWasm_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) // result: (Lsh64x64 x (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2532,13 +2744,15 @@ func rewriteValueWasm_OpLsh64x16_0(v *Value) bool { } } func rewriteValueWasm_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) // result: (Lsh64x64 x (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -2548,14 +2762,16 @@ func rewriteValueWasm_OpLsh64x32_0(v *Value) bool { } } func rewriteValueWasm_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x64 x y) // cond: shiftIsBounded(v) // result: (I64Shl x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -2568,9 +2784,7 @@ func rewriteValueWasm_OpLsh64x64_0(v *Value) bool { // cond: uint64(c) < 64 // result: (I64Shl x (I64Const [c])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -2589,8 +2803,6 @@ func rewriteValueWasm_OpLsh64x64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (I64Const [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -2605,8 +2817,8 @@ func rewriteValueWasm_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 x y) // result: (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmSelect) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -2625,13 +2837,15 @@ func rewriteValueWasm_OpLsh64x64_0(v *Value) bool { } } func rewriteValueWasm_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) // result: (Lsh64x64 x (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -2641,13 +2855,15 @@ func rewriteValueWasm_OpLsh64x8_0(v *Value) bool { } } func rewriteValueWasm_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) // result: (Lsh64x64 x (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2657,13 +2873,15 @@ func rewriteValueWasm_OpLsh8x16_0(v *Value) bool { } } func rewriteValueWasm_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) // result: (Lsh64x64 x (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -2673,11 +2891,13 @@ func rewriteValueWasm_OpLsh8x32_0(v *Value) bool { } } func rewriteValueWasm_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Lsh8x64 x y) // result: (Lsh64x64 x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v.AddArg(y) @@ -2685,13 +2905,15 @@ func rewriteValueWasm_OpLsh8x64_0(v *Value) bool { } } func rewriteValueWasm_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) // result: (Lsh64x64 x (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -2701,13 +2923,15 @@ func rewriteValueWasm_OpLsh8x8_0(v *Value) bool { } } func rewriteValueWasm_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (I64RemS (SignExt16to64 x) (SignExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -2719,13 +2943,15 @@ func rewriteValueWasm_OpMod16_0(v *Value) bool { } } func rewriteValueWasm_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -2737,13 +2963,15 @@ func rewriteValueWasm_OpMod16u_0(v *Value) bool { } } func rewriteValueWasm_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) // result: (I64RemS (SignExt32to64 x) (SignExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -2755,13 +2983,15 @@ func rewriteValueWasm_OpMod32_0(v *Value) bool { } } func rewriteValueWasm_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) // result: (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -2773,11 +3003,13 @@ func rewriteValueWasm_OpMod32u_0(v *Value) bool { } } func rewriteValueWasm_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64 x y) // result: (I64RemS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemS) v.AddArg(x) v.AddArg(y) @@ -2785,11 +3017,13 @@ func rewriteValueWasm_OpMod64_0(v *Value) bool { } } func rewriteValueWasm_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mod64u x y) // result: (I64RemU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemU) v.AddArg(x) v.AddArg(y) @@ -2797,13 +3031,15 @@ func rewriteValueWasm_OpMod64u_0(v *Value) bool { } } func rewriteValueWasm_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (I64RemS (SignExt8to64 x) (SignExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemS) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -2815,13 +3051,15 @@ func rewriteValueWasm_OpMod8_0(v *Value) bool { } } func rewriteValueWasm_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64RemU) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -2833,6 +3071,9 @@ func rewriteValueWasm_OpMod8u_0(v *Value) bool { } } func rewriteValueWasm_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) @@ -2841,7 +3082,7 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[2] + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -2853,9 +3094,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store8) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) @@ -2871,9 +3112,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store16) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) @@ -2889,9 +3130,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store32) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) @@ -2907,9 +3148,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) @@ -2925,9 +3166,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store) v.AuxInt = 8 v.AddArg(dst) @@ -2952,9 +3193,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store8) v.AuxInt = 2 v.AddArg(dst) @@ -2979,9 +3220,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store8) v.AuxInt = 4 v.AddArg(dst) @@ -3006,9 +3247,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store16) v.AuxInt = 4 v.AddArg(dst) @@ -3033,9 +3274,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 v.reset(OpWasmI64Store32) v.AuxInt = 3 v.AddArg(dst) @@ -3057,6 +3298,9 @@ func rewriteValueWasm_OpMove_0(v *Value) bool { return false } func rewriteValueWasm_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Move [s] dst src mem) @@ -3064,9 +3308,9 @@ func rewriteValueWasm_OpMove_10(v *Value) bool { // result: (I64Store [s-8] dst (I64Load [s-8] src mem) (I64Store dst (I64Load src mem) mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 8 && s < 16) { break } @@ -3093,9 +3337,9 @@ func rewriteValueWasm_OpMove_10(v *Value) bool { // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (I64Store dst (I64Load src mem) mem)) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 16 && s%16 != 0 && s%16 <= 8) { break } @@ -3124,9 +3368,9 @@ func rewriteValueWasm_OpMove_10(v *Value) bool { // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem))) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s > 16 && s%16 != 0 && s%16 > 8) { break } @@ -3164,9 +3408,9 @@ func rewriteValueWasm_OpMove_10(v *Value) bool { // result: (LoweredMove [s/8] dst src mem) for { s := v.AuxInt - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(s%8 == 0) { break } @@ -3180,11 +3424,13 @@ func rewriteValueWasm_OpMove_10(v *Value) bool { return false } func rewriteValueWasm_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul16 x y) // result: (I64Mul x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Mul) v.AddArg(x) v.AddArg(y) @@ -3192,11 +3438,13 @@ func rewriteValueWasm_OpMul16_0(v *Value) bool { } } func rewriteValueWasm_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32 x y) // result: (I64Mul x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Mul) v.AddArg(x) v.AddArg(y) @@ -3204,11 +3452,13 @@ func rewriteValueWasm_OpMul32_0(v *Value) bool { } } func rewriteValueWasm_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F x y) // result: (F32Mul x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Mul) v.AddArg(x) v.AddArg(y) @@ -3216,11 +3466,13 @@ func rewriteValueWasm_OpMul32F_0(v *Value) bool { } } func rewriteValueWasm_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64 x y) // result: (I64Mul x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Mul) v.AddArg(x) v.AddArg(y) @@ -3228,11 +3480,13 @@ func rewriteValueWasm_OpMul64_0(v *Value) bool { } } func rewriteValueWasm_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F x y) // result: (F64Mul x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Mul) v.AddArg(x) v.AddArg(y) @@ -3240,11 +3494,13 @@ func rewriteValueWasm_OpMul64F_0(v *Value) bool { } } func rewriteValueWasm_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul8 x y) // result: (I64Mul x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Mul) v.AddArg(x) v.AddArg(y) @@ -3252,12 +3508,13 @@ func rewriteValueWasm_OpMul8_0(v *Value) bool { } } func rewriteValueWasm_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg16 x) // result: (I64Sub (I64Const [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 @@ -3267,12 +3524,13 @@ func rewriteValueWasm_OpNeg16_0(v *Value) bool { } } func rewriteValueWasm_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg32 x) // result: (I64Sub (I64Const [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 @@ -3282,22 +3540,24 @@ func rewriteValueWasm_OpNeg32_0(v *Value) bool { } } func rewriteValueWasm_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F x) // result: (F32Neg x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF32Neg) v.AddArg(x) return true } } func rewriteValueWasm_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg64 x) // result: (I64Sub (I64Const [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 @@ -3307,22 +3567,24 @@ func rewriteValueWasm_OpNeg64_0(v *Value) bool { } } func rewriteValueWasm_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F x) // result: (F64Neg x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Neg) v.AddArg(x) return true } } func rewriteValueWasm_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neg8 x) // result: (I64Sub (I64Const [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Sub) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = 0 @@ -3332,13 +3594,15 @@ func rewriteValueWasm_OpNeg8_0(v *Value) bool { } } func rewriteValueWasm_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -3350,13 +3614,15 @@ func rewriteValueWasm_OpNeq16_0(v *Value) bool { } } func rewriteValueWasm_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32 x y) // result: (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -3368,11 +3634,13 @@ func rewriteValueWasm_OpNeq32_0(v *Value) bool { } } func rewriteValueWasm_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq32F x y) // result: (F32Ne x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Ne) v.AddArg(x) v.AddArg(y) @@ -3380,11 +3648,13 @@ func rewriteValueWasm_OpNeq32F_0(v *Value) bool { } } func rewriteValueWasm_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq64 x y) // result: (I64Ne x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Ne) v.AddArg(x) v.AddArg(y) @@ -3392,11 +3662,13 @@ func rewriteValueWasm_OpNeq64_0(v *Value) bool { } } func rewriteValueWasm_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq64F x y) // result: (F64Ne x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Ne) v.AddArg(x) v.AddArg(y) @@ -3404,13 +3676,15 @@ func rewriteValueWasm_OpNeq64F_0(v *Value) bool { } } func rewriteValueWasm_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -3422,11 +3696,13 @@ func rewriteValueWasm_OpNeq8_0(v *Value) bool { } } func rewriteValueWasm_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB x y) // result: (I64Ne x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Ne) v.AddArg(x) v.AddArg(y) @@ -3434,11 +3710,13 @@ func rewriteValueWasm_OpNeqB_0(v *Value) bool { } } func rewriteValueWasm_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqPtr x y) // result: (I64Ne x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Ne) v.AddArg(x) v.AddArg(y) @@ -3446,11 +3724,13 @@ func rewriteValueWasm_OpNeqPtr_0(v *Value) bool { } } func rewriteValueWasm_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NilCheck ptr mem) // result: (LoweredNilCheck ptr mem) for { - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 v.reset(OpWasmLoweredNilCheck) v.AddArg(ptr) v.AddArg(mem) @@ -3458,21 +3738,23 @@ func rewriteValueWasm_OpNilCheck_0(v *Value) bool { } } func rewriteValueWasm_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not x) // result: (I64Eqz x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Eqz) v.AddArg(x) return true } } func rewriteValueWasm_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr [off] ptr) // result: (I64AddConst [off] ptr) for { off := v.AuxInt - ptr := v.Args[0] + ptr := v_0 v.reset(OpWasmI64AddConst) v.AuxInt = off v.AddArg(ptr) @@ -3480,11 +3762,13 @@ func rewriteValueWasm_OpOffPtr_0(v *Value) bool { } } func rewriteValueWasm_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or16 x y) // result: (I64Or x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Or) v.AddArg(x) v.AddArg(y) @@ -3492,11 +3776,13 @@ func rewriteValueWasm_OpOr16_0(v *Value) bool { } } func rewriteValueWasm_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or32 x y) // result: (I64Or x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Or) v.AddArg(x) v.AddArg(y) @@ -3504,11 +3790,13 @@ func rewriteValueWasm_OpOr32_0(v *Value) bool { } } func rewriteValueWasm_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or64 x y) // result: (I64Or x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Or) v.AddArg(x) v.AddArg(y) @@ -3516,11 +3804,13 @@ func rewriteValueWasm_OpOr64_0(v *Value) bool { } } func rewriteValueWasm_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Or8 x y) // result: (I64Or x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Or) v.AddArg(x) v.AddArg(y) @@ -3528,11 +3818,13 @@ func rewriteValueWasm_OpOr8_0(v *Value) bool { } } func rewriteValueWasm_OpOrB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (OrB x y) // result: (I64Or x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Or) v.AddArg(x) v.AddArg(y) @@ -3540,12 +3832,13 @@ func rewriteValueWasm_OpOrB_0(v *Value) bool { } } func rewriteValueWasm_OpPopCount16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount16 x) // result: (I64Popcnt (ZeroExt16to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Popcnt) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -3554,12 +3847,13 @@ func rewriteValueWasm_OpPopCount16_0(v *Value) bool { } } func rewriteValueWasm_OpPopCount32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount32 x) // result: (I64Popcnt (ZeroExt32to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Popcnt) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -3568,22 +3862,24 @@ func rewriteValueWasm_OpPopCount32_0(v *Value) bool { } } func rewriteValueWasm_OpPopCount64_0(v *Value) bool { + v_0 := v.Args[0] // match: (PopCount64 x) // result: (I64Popcnt x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Popcnt) v.AddArg(x) return true } } func rewriteValueWasm_OpPopCount8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount8 x) // result: (I64Popcnt (ZeroExt8to64 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64Popcnt) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -3592,15 +3888,15 @@ func rewriteValueWasm_OpPopCount8_0(v *Value) bool { } } func rewriteValueWasm_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 x (I64Const [c])) // result: (Or16 (Lsh16x64 x (I64Const [c&15])) (Rsh16Ux64 x (I64Const [-c&15]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -3623,11 +3919,13 @@ func rewriteValueWasm_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValueWasm_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft32 x y) // result: (I32Rotl x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI32Rotl) v.AddArg(x) v.AddArg(y) @@ -3635,11 +3933,13 @@ func rewriteValueWasm_OpRotateLeft32_0(v *Value) bool { } } func rewriteValueWasm_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft64 x y) // result: (I64Rotl x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Rotl) v.AddArg(x) v.AddArg(y) @@ -3647,15 +3947,15 @@ func rewriteValueWasm_OpRotateLeft64_0(v *Value) bool { } } func rewriteValueWasm_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 x (I64Const [c])) // result: (Or8 (Lsh8x64 x (I64Const [c&7])) (Rsh8Ux64 x (I64Const [-c&7]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -3678,10 +3978,11 @@ func rewriteValueWasm_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValueWasm_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -3689,10 +3990,11 @@ func rewriteValueWasm_OpRound32F_0(v *Value) bool { } } func rewriteValueWasm_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -3700,23 +4002,26 @@ func rewriteValueWasm_OpRound64F_0(v *Value) bool { } } func rewriteValueWasm_OpRoundToEven_0(v *Value) bool { + v_0 := v.Args[0] // match: (RoundToEven x) // result: (F64Nearest x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Nearest) v.AddArg(x) return true } } func rewriteValueWasm_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) // result: (Rsh64Ux64 (ZeroExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -3728,13 +4033,15 @@ func rewriteValueWasm_OpRsh16Ux16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) // result: (Rsh64Ux64 (ZeroExt16to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -3746,13 +4053,15 @@ func rewriteValueWasm_OpRsh16Ux32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) // result: (Rsh64Ux64 (ZeroExt16to64 x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -3762,13 +4071,15 @@ func rewriteValueWasm_OpRsh16Ux64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) // result: (Rsh64Ux64 (ZeroExt16to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) @@ -3780,13 +4091,15 @@ func rewriteValueWasm_OpRsh16Ux8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) // result: (Rsh64x64 (SignExt16to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -3798,13 +4111,15 @@ func rewriteValueWasm_OpRsh16x16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) // result: (Rsh64x64 (SignExt16to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -3816,13 +4131,15 @@ func rewriteValueWasm_OpRsh16x32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) // result: (Rsh64x64 (SignExt16to64 x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -3832,13 +4149,15 @@ func rewriteValueWasm_OpRsh16x64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) // result: (Rsh64x64 (SignExt16to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) @@ -3850,13 +4169,15 @@ func rewriteValueWasm_OpRsh16x8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // result: (Rsh64Ux64 (ZeroExt32to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -3868,13 +4189,15 @@ func rewriteValueWasm_OpRsh32Ux16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // result: (Rsh64Ux64 (ZeroExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -3886,13 +4209,15 @@ func rewriteValueWasm_OpRsh32Ux32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // result: (Rsh64Ux64 (ZeroExt32to64 x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -3902,13 +4227,15 @@ func rewriteValueWasm_OpRsh32Ux64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // result: (Rsh64Ux64 (ZeroExt32to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) @@ -3920,13 +4247,15 @@ func rewriteValueWasm_OpRsh32Ux8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // result: (Rsh64x64 (SignExt32to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -3938,13 +4267,15 @@ func rewriteValueWasm_OpRsh32x16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) // result: (Rsh64x64 (SignExt32to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -3956,13 +4287,15 @@ func rewriteValueWasm_OpRsh32x32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x y) // result: (Rsh64x64 (SignExt32to64 x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -3972,13 +4305,15 @@ func rewriteValueWasm_OpRsh32x64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // result: (Rsh64x64 (SignExt32to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) @@ -3990,13 +4325,15 @@ func rewriteValueWasm_OpRsh32x8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) // result: (Rsh64Ux64 x (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -4006,13 +4343,15 @@ func rewriteValueWasm_OpRsh64Ux16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) // result: (Rsh64Ux64 x (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -4022,14 +4361,16 @@ func rewriteValueWasm_OpRsh64Ux32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux64 x y) // cond: shiftIsBounded(v) // result: (I64ShrU x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -4042,9 +4383,7 @@ func rewriteValueWasm_OpRsh64Ux64_0(v *Value) bool { // cond: uint64(c) < 64 // result: (I64ShrU x (I64Const [c])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -4063,8 +4402,6 @@ func rewriteValueWasm_OpRsh64Ux64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (I64Const [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -4079,8 +4416,8 @@ func rewriteValueWasm_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x y) // result: (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64]))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmSelect) v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) v0.AddArg(x) @@ -4099,13 +4436,15 @@ func rewriteValueWasm_OpRsh64Ux64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) // result: (Rsh64Ux64 x (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -4115,13 +4454,15 @@ func rewriteValueWasm_OpRsh64Ux8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) // result: (Rsh64x64 x (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -4131,13 +4472,15 @@ func rewriteValueWasm_OpRsh64x16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x y) // result: (Rsh64x64 x (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -4147,14 +4490,16 @@ func rewriteValueWasm_OpRsh64x32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x64 x y) // cond: shiftIsBounded(v) // result: (I64ShrS x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 if !(shiftIsBounded(v)) { break } @@ -4167,9 +4512,7 @@ func rewriteValueWasm_OpRsh64x64_0(v *Value) bool { // cond: uint64(c) < 64 // result: (I64ShrS x (I64Const [c])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -4188,9 +4531,7 @@ func rewriteValueWasm_OpRsh64x64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (I64ShrS x (I64Const [63])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -4208,8 +4549,8 @@ func rewriteValueWasm_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x y) // result: (I64ShrS x (Select y (I64Const [63]) (I64LtU y (I64Const [64])))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64ShrS) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64) @@ -4228,13 +4569,15 @@ func rewriteValueWasm_OpRsh64x64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) // result: (Rsh64x64 x (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -4244,13 +4587,15 @@ func rewriteValueWasm_OpRsh64x8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) // result: (Rsh64Ux64 (ZeroExt8to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -4262,13 +4607,15 @@ func rewriteValueWasm_OpRsh8Ux16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) // result: (Rsh64Ux64 (ZeroExt8to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -4280,13 +4627,15 @@ func rewriteValueWasm_OpRsh8Ux32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) // result: (Rsh64Ux64 (ZeroExt8to64 x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -4296,13 +4645,15 @@ func rewriteValueWasm_OpRsh8Ux64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) // result: (Rsh64Ux64 (ZeroExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64Ux64) v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) @@ -4314,13 +4665,15 @@ func rewriteValueWasm_OpRsh8Ux8_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) // result: (Rsh64x64 (SignExt8to64 x) (ZeroExt16to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -4332,13 +4685,15 @@ func rewriteValueWasm_OpRsh8x16_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) // result: (Rsh64x64 (SignExt8to64 x) (ZeroExt32to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -4350,13 +4705,15 @@ func rewriteValueWasm_OpRsh8x32_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) // result: (Rsh64x64 (SignExt8to64 x) y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -4366,13 +4723,15 @@ func rewriteValueWasm_OpRsh8x64_0(v *Value) bool { } } func rewriteValueWasm_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) // result: (Rsh64x64 (SignExt8to64 x) (ZeroExt8to64 y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpRsh64x64) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) @@ -4384,12 +4743,13 @@ func rewriteValueWasm_OpRsh8x8_0(v *Value) bool { } } func rewriteValueWasm_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt16to32 x:(I64Load16S _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load16S { break } @@ -4403,7 +4763,7 @@ func rewriteValueWasm_OpSignExt16to32_0(v *Value) bool { // cond: objabi.GOWASM.SignExt // result: (I64Extend16S x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOWASM.SignExt) { break } @@ -4414,7 +4774,7 @@ func rewriteValueWasm_OpSignExt16to32_0(v *Value) bool { // match: (SignExt16to32 x) // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -4429,12 +4789,13 @@ func rewriteValueWasm_OpSignExt16to32_0(v *Value) bool { } } func rewriteValueWasm_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt16to64 x:(I64Load16S _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load16S { break } @@ -4448,7 +4809,7 @@ func rewriteValueWasm_OpSignExt16to64_0(v *Value) bool { // cond: objabi.GOWASM.SignExt // result: (I64Extend16S x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOWASM.SignExt) { break } @@ -4459,7 +4820,7 @@ func rewriteValueWasm_OpSignExt16to64_0(v *Value) bool { // match: (SignExt16to64 x) // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -4474,12 +4835,13 @@ func rewriteValueWasm_OpSignExt16to64_0(v *Value) bool { } } func rewriteValueWasm_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt32to64 x:(I64Load32S _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load32S { break } @@ -4493,7 +4855,7 @@ func rewriteValueWasm_OpSignExt32to64_0(v *Value) bool { // cond: objabi.GOWASM.SignExt // result: (I64Extend32S x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOWASM.SignExt) { break } @@ -4504,7 +4866,7 @@ func rewriteValueWasm_OpSignExt32to64_0(v *Value) bool { // match: (SignExt32to64 x) // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -4519,12 +4881,13 @@ func rewriteValueWasm_OpSignExt32to64_0(v *Value) bool { } } func rewriteValueWasm_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt8to16 x:(I64Load8S _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load8S { break } @@ -4538,7 +4901,7 @@ func rewriteValueWasm_OpSignExt8to16_0(v *Value) bool { // cond: objabi.GOWASM.SignExt // result: (I64Extend8S x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOWASM.SignExt) { break } @@ -4549,7 +4912,7 @@ func rewriteValueWasm_OpSignExt8to16_0(v *Value) bool { // match: (SignExt8to16 x) // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -4564,12 +4927,13 @@ func rewriteValueWasm_OpSignExt8to16_0(v *Value) bool { } } func rewriteValueWasm_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt8to32 x:(I64Load8S _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load8S { break } @@ -4583,7 +4947,7 @@ func rewriteValueWasm_OpSignExt8to32_0(v *Value) bool { // cond: objabi.GOWASM.SignExt // result: (I64Extend8S x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOWASM.SignExt) { break } @@ -4594,7 +4958,7 @@ func rewriteValueWasm_OpSignExt8to32_0(v *Value) bool { // match: (SignExt8to32 x) // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -4609,12 +4973,13 @@ func rewriteValueWasm_OpSignExt8to32_0(v *Value) bool { } } func rewriteValueWasm_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt8to64 x:(I64Load8S _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load8S { break } @@ -4628,7 +4993,7 @@ func rewriteValueWasm_OpSignExt8to64_0(v *Value) bool { // cond: objabi.GOWASM.SignExt // result: (I64Extend8S x) for { - x := v.Args[0] + x := v_0 if !(objabi.GOWASM.SignExt) { break } @@ -4639,7 +5004,7 @@ func rewriteValueWasm_OpSignExt8to64_0(v *Value) bool { // match: (SignExt8to64 x) // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) v0.AddArg(x) @@ -4654,12 +5019,13 @@ func rewriteValueWasm_OpSignExt8to64_0(v *Value) bool { } } func rewriteValueWasm_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Slicemask x) // result: (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64ShrS) v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64) v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4674,22 +5040,24 @@ func rewriteValueWasm_OpSlicemask_0(v *Value) bool { } } func rewriteValueWasm_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt x) // result: (F64Sqrt x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Sqrt) v.AddArg(x) return true } } func rewriteValueWasm_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] // match: (StaticCall [argwid] {target} mem) // result: (LoweredStaticCall [argwid] {target} mem) for { argwid := v.AuxInt target := v.Aux - mem := v.Args[0] + mem := v_0 v.reset(OpWasmLoweredStaticCall) v.AuxInt = argwid v.Aux = target @@ -4698,14 +5066,17 @@ func rewriteValueWasm_OpStaticCall_0(v *Value) bool { } } func rewriteValueWasm_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: is64BitFloat(t.(*types.Type)) // result: (F64Store ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(is64BitFloat(t.(*types.Type))) { break } @@ -4720,9 +5091,9 @@ func rewriteValueWasm_OpStore_0(v *Value) bool { // result: (F32Store ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(is32BitFloat(t.(*types.Type))) { break } @@ -4737,9 +5108,9 @@ func rewriteValueWasm_OpStore_0(v *Value) bool { // result: (I64Store ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 8) { break } @@ -4754,9 +5125,9 @@ func rewriteValueWasm_OpStore_0(v *Value) bool { // result: (I64Store32 ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 4) { break } @@ -4771,9 +5142,9 @@ func rewriteValueWasm_OpStore_0(v *Value) bool { // result: (I64Store16 ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 2) { break } @@ -4788,9 +5159,9 @@ func rewriteValueWasm_OpStore_0(v *Value) bool { // result: (I64Store8 ptr val mem) for { t := v.Aux - mem := v.Args[2] - ptr := v.Args[0] - val := v.Args[1] + ptr := v_0 + val := v_1 + mem := v_2 if !(t.(*types.Type).Size() == 1) { break } @@ -4803,11 +5174,13 @@ func rewriteValueWasm_OpStore_0(v *Value) bool { return false } func rewriteValueWasm_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub16 x y) // result: (I64Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Sub) v.AddArg(x) v.AddArg(y) @@ -4815,11 +5188,13 @@ func rewriteValueWasm_OpSub16_0(v *Value) bool { } } func rewriteValueWasm_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32 x y) // result: (I64Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Sub) v.AddArg(x) v.AddArg(y) @@ -4827,11 +5202,13 @@ func rewriteValueWasm_OpSub32_0(v *Value) bool { } } func rewriteValueWasm_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F x y) // result: (F32Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF32Sub) v.AddArg(x) v.AddArg(y) @@ -4839,11 +5216,13 @@ func rewriteValueWasm_OpSub32F_0(v *Value) bool { } } func rewriteValueWasm_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64 x y) // result: (I64Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Sub) v.AddArg(x) v.AddArg(y) @@ -4851,11 +5230,13 @@ func rewriteValueWasm_OpSub64_0(v *Value) bool { } } func rewriteValueWasm_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F x y) // result: (F64Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmF64Sub) v.AddArg(x) v.AddArg(y) @@ -4863,11 +5244,13 @@ func rewriteValueWasm_OpSub64F_0(v *Value) bool { } } func rewriteValueWasm_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub8 x y) // result: (I64Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Sub) v.AddArg(x) v.AddArg(y) @@ -4875,11 +5258,13 @@ func rewriteValueWasm_OpSub8_0(v *Value) bool { } } func rewriteValueWasm_OpSubPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (SubPtr x y) // result: (I64Sub x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Sub) v.AddArg(x) v.AddArg(y) @@ -4887,20 +5272,22 @@ func rewriteValueWasm_OpSubPtr_0(v *Value) bool { } } func rewriteValueWasm_OpTrunc_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc x) // result: (F64Trunc x) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmF64Trunc) v.AddArg(x) return true } } func rewriteValueWasm_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4908,10 +5295,11 @@ func rewriteValueWasm_OpTrunc16to8_0(v *Value) bool { } } func rewriteValueWasm_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4919,10 +5307,11 @@ func rewriteValueWasm_OpTrunc32to16_0(v *Value) bool { } } func rewriteValueWasm_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4930,10 +5319,11 @@ func rewriteValueWasm_OpTrunc32to8_0(v *Value) bool { } } func rewriteValueWasm_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4941,10 +5331,11 @@ func rewriteValueWasm_OpTrunc64to16_0(v *Value) bool { } } func rewriteValueWasm_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4952,10 +5343,11 @@ func rewriteValueWasm_OpTrunc64to32_0(v *Value) bool { } } func rewriteValueWasm_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 x) // result: x for { - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -4963,13 +5355,16 @@ func rewriteValueWasm_OpTrunc64to8_0(v *Value) bool { } } func rewriteValueWasm_OpWB_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (WB {fn} destptr srcptr mem) // result: (LoweredWB {fn} destptr srcptr mem) for { fn := v.Aux - mem := v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] + destptr := v_0 + srcptr := v_1 + mem := v_2 v.reset(OpWasmLoweredWB) v.Aux = fn v.AddArg(destptr) @@ -4979,18 +5374,17 @@ func rewriteValueWasm_OpWB_0(v *Value) bool { } } func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (F64Add (F64Const [x]) (F64Const [y])) // result: (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmF64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmF64Const { break } @@ -5002,12 +5396,11 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool { // match: (F64Add (F64Const [x]) y) // result: (F64Add y (F64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmF64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmF64Add) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) @@ -5018,18 +5411,17 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (F64Mul (F64Const [x]) (F64Const [y])) // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmF64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmF64Const { break } @@ -5041,12 +5433,11 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool { // match: (F64Mul (F64Const [x]) y) // result: (F64Mul y (F64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmF64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmF64Mul) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) @@ -5057,18 +5448,17 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Add_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64Add (I64Const [x]) (I64Const [y])) // result: (I64Const [x + y]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5080,12 +5470,11 @@ func rewriteValueWasm_OpWasmI64Add_0(v *Value) bool { // match: (I64Add (I64Const [x]) y) // result: (I64Add y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64Add) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5096,9 +5485,7 @@ func rewriteValueWasm_OpWasmI64Add_0(v *Value) bool { // match: (I64Add x (I64Const [y])) // result: (I64AddConst [y] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const { break } @@ -5111,13 +5498,14 @@ func rewriteValueWasm_OpWasmI64Add_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64AddConst_0(v *Value) bool { + v_0 := v.Args[0] // match: (I64AddConst [0] x) // result: x for { if v.AuxInt != 0 { break } - x := v.Args[0] + x := v_0 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -5128,7 +5516,6 @@ func rewriteValueWasm_OpWasmI64AddConst_0(v *Value) bool { // result: (LoweredAddr {sym} [off+off2] base) for { off := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpWasmLoweredAddr { break } @@ -5147,18 +5534,17 @@ func rewriteValueWasm_OpWasmI64AddConst_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64And_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64And (I64Const [x]) (I64Const [y])) // result: (I64Const [x & y]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5170,12 +5556,11 @@ func rewriteValueWasm_OpWasmI64And_0(v *Value) bool { // match: (I64And (I64Const [x]) y) // result: (I64And y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64And) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5186,19 +5571,18 @@ func rewriteValueWasm_OpWasmI64And_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Eq_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64Eq (I64Const [x]) (I64Const [y])) // cond: x == y // result: (I64Const [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5214,13 +5598,10 @@ func rewriteValueWasm_OpWasmI64Eq_0(v *Value) bool { // cond: x != y // result: (I64Const [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5235,12 +5616,11 @@ func rewriteValueWasm_OpWasmI64Eq_0(v *Value) bool { // match: (I64Eq (I64Const [x]) y) // result: (I64Eq y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64Eq) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5251,9 +5631,7 @@ func rewriteValueWasm_OpWasmI64Eq_0(v *Value) bool { // match: (I64Eq x (I64Const [0])) // result: (I64Eqz x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const || v_1.AuxInt != 0 { break } @@ -5264,10 +5642,10 @@ func rewriteValueWasm_OpWasmI64Eq_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Eqz_0(v *Value) bool { + v_0 := v.Args[0] // match: (I64Eqz (I64Eqz (I64Eqz x))) // result: (I64Eqz x) for { - v_0 := v.Args[0] if v_0.Op != OpWasmI64Eqz { break } @@ -5283,6 +5661,8 @@ func rewriteValueWasm_OpWasmI64Eqz_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (I64Load [off] (I64AddConst [off2] ptr) mem) @@ -5290,13 +5670,12 @@ func rewriteValueWasm_OpWasmI64Load_0(v *Value) bool { // result: (I64Load [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5311,8 +5690,6 @@ func rewriteValueWasm_OpWasmI64Load_0(v *Value) bool { // result: (I64Const [int64(read64(sym, off+off2, config.BigEndian))]) for { off := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmLoweredAddr { break } @@ -5329,18 +5706,19 @@ func rewriteValueWasm_OpWasmI64Load_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load16S_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Load16S [off] (I64AddConst [off2] ptr) mem) // cond: isU32Bit(off+off2) // result: (I64Load16S [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5353,6 +5731,8 @@ func rewriteValueWasm_OpWasmI64Load16S_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (I64Load16U [off] (I64AddConst [off2] ptr) mem) @@ -5360,13 +5740,12 @@ func rewriteValueWasm_OpWasmI64Load16U_0(v *Value) bool { // result: (I64Load16U [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5381,8 +5760,6 @@ func rewriteValueWasm_OpWasmI64Load16U_0(v *Value) bool { // result: (I64Const [int64(read16(sym, off+off2, config.BigEndian))]) for { off := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmLoweredAddr { break } @@ -5399,18 +5776,19 @@ func rewriteValueWasm_OpWasmI64Load16U_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load32S_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Load32S [off] (I64AddConst [off2] ptr) mem) // cond: isU32Bit(off+off2) // result: (I64Load32S [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5423,6 +5801,8 @@ func rewriteValueWasm_OpWasmI64Load32S_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (I64Load32U [off] (I64AddConst [off2] ptr) mem) @@ -5430,13 +5810,12 @@ func rewriteValueWasm_OpWasmI64Load32U_0(v *Value) bool { // result: (I64Load32U [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5451,8 +5830,6 @@ func rewriteValueWasm_OpWasmI64Load32U_0(v *Value) bool { // result: (I64Const [int64(read32(sym, off+off2, config.BigEndian))]) for { off := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmLoweredAddr { break } @@ -5469,18 +5846,19 @@ func rewriteValueWasm_OpWasmI64Load32U_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load8S_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Load8S [off] (I64AddConst [off2] ptr) mem) // cond: isU32Bit(off+off2) // result: (I64Load8S [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5493,18 +5871,19 @@ func rewriteValueWasm_OpWasmI64Load8S_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Load8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Load8U [off] (I64AddConst [off2] ptr) mem) // cond: isU32Bit(off+off2) // result: (I64Load8U [off+off2] ptr mem) for { off := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] + mem := v_1 if !(isU32Bit(off + off2)) { break } @@ -5519,8 +5898,6 @@ func rewriteValueWasm_OpWasmI64Load8U_0(v *Value) bool { // result: (I64Const [int64(read8(sym, off+off2))]) for { off := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmLoweredAddr { break } @@ -5537,18 +5914,17 @@ func rewriteValueWasm_OpWasmI64Load8U_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Mul_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64Mul (I64Const [x]) (I64Const [y])) // result: (I64Const [x * y]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5560,12 +5936,11 @@ func rewriteValueWasm_OpWasmI64Mul_0(v *Value) bool { // match: (I64Mul (I64Const [x]) y) // result: (I64Mul y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64Mul) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5576,19 +5951,18 @@ func rewriteValueWasm_OpWasmI64Mul_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Ne_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64Ne (I64Const [x]) (I64Const [y])) // cond: x == y // result: (I64Const [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5604,13 +5978,10 @@ func rewriteValueWasm_OpWasmI64Ne_0(v *Value) bool { // cond: x != y // result: (I64Const [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5625,12 +5996,11 @@ func rewriteValueWasm_OpWasmI64Ne_0(v *Value) bool { // match: (I64Ne (I64Const [x]) y) // result: (I64Ne y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64Ne) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5641,9 +6011,7 @@ func rewriteValueWasm_OpWasmI64Ne_0(v *Value) bool { // match: (I64Ne x (I64Const [0])) // result: (I64Eqz (I64Eqz x)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpWasmI64Const || v_1.AuxInt != 0 { break } @@ -5656,18 +6024,17 @@ func rewriteValueWasm_OpWasmI64Ne_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Or_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64Or (I64Const [x]) (I64Const [y])) // result: (I64Const [x | y]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5679,12 +6046,11 @@ func rewriteValueWasm_OpWasmI64Or_0(v *Value) bool { // match: (I64Or (I64Const [x]) y) // result: (I64Or y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64Or) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5695,16 +6061,15 @@ func rewriteValueWasm_OpWasmI64Or_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Shl_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Shl (I64Const [x]) (I64Const [y])) // result: (I64Const [x << uint64(y)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5716,16 +6081,15 @@ func rewriteValueWasm_OpWasmI64Shl_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64ShrS_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64ShrS (I64Const [x]) (I64Const [y])) // result: (I64Const [x >> uint64(y)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5737,16 +6101,15 @@ func rewriteValueWasm_OpWasmI64ShrS_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64ShrU_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64ShrU (I64Const [x]) (I64Const [y])) // result: (I64Const [int64(uint64(x) >> uint64(y))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5758,19 +6121,21 @@ func rewriteValueWasm_OpWasmI64ShrU_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Store_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Store [off] (I64AddConst [off2] ptr) val mem) // cond: isU32Bit(off+off2) // result: (I64Store [off+off2] ptr val mem) for { off := v.AuxInt - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(isU32Bit(off + off2)) { break } @@ -5784,19 +6149,21 @@ func rewriteValueWasm_OpWasmI64Store_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Store16_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Store16 [off] (I64AddConst [off2] ptr) val mem) // cond: isU32Bit(off+off2) // result: (I64Store16 [off+off2] ptr val mem) for { off := v.AuxInt - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(isU32Bit(off + off2)) { break } @@ -5810,19 +6177,21 @@ func rewriteValueWasm_OpWasmI64Store16_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Store32_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Store32 [off] (I64AddConst [off2] ptr) val mem) // cond: isU32Bit(off+off2) // result: (I64Store32 [off+off2] ptr val mem) for { off := v.AuxInt - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(isU32Bit(off + off2)) { break } @@ -5836,19 +6205,21 @@ func rewriteValueWasm_OpWasmI64Store32_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Store8_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (I64Store8 [off] (I64AddConst [off2] ptr) val mem) // cond: isU32Bit(off+off2) // result: (I64Store8 [off+off2] ptr val mem) for { off := v.AuxInt - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpWasmI64AddConst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] + val := v_1 + mem := v_2 if !(isU32Bit(off + off2)) { break } @@ -5862,18 +6233,17 @@ func rewriteValueWasm_OpWasmI64Store8_0(v *Value) bool { return false } func rewriteValueWasm_OpWasmI64Xor_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (I64Xor (I64Const [x]) (I64Const [y])) // result: (I64Const [x ^ y]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpWasmI64Const { break } @@ -5885,12 +6255,11 @@ func rewriteValueWasm_OpWasmI64Xor_0(v *Value) bool { // match: (I64Xor (I64Const [x]) y) // result: (I64Xor y (I64Const [x])) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpWasmI64Const { break } x := v_0.AuxInt + y := v_1 v.reset(OpWasmI64Xor) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5901,11 +6270,13 @@ func rewriteValueWasm_OpWasmI64Xor_0(v *Value) bool { return false } func rewriteValueWasm_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor16 x y) // result: (I64Xor x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Xor) v.AddArg(x) v.AddArg(y) @@ -5913,11 +6284,13 @@ func rewriteValueWasm_OpXor16_0(v *Value) bool { } } func rewriteValueWasm_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor32 x y) // result: (I64Xor x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Xor) v.AddArg(x) v.AddArg(y) @@ -5925,11 +6298,13 @@ func rewriteValueWasm_OpXor32_0(v *Value) bool { } } func rewriteValueWasm_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor64 x y) // result: (I64Xor x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Xor) v.AddArg(x) v.AddArg(y) @@ -5937,11 +6312,13 @@ func rewriteValueWasm_OpXor64_0(v *Value) bool { } } func rewriteValueWasm_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Xor8 x y) // result: (I64Xor x y) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpWasmI64Xor) v.AddArg(x) v.AddArg(y) @@ -5949,6 +6326,8 @@ func rewriteValueWasm_OpXor8_0(v *Value) bool { } } func rewriteValueWasm_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [0] _ mem) @@ -5957,7 +6336,7 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 0 { break } - mem := v.Args[1] + mem := v_1 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -5969,8 +6348,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 1 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store8) v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -5985,8 +6364,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 2 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store16) v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6001,8 +6380,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 4 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store32) v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6017,8 +6396,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 8 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store) v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6033,8 +6412,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 3 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store8) v.AuxInt = 2 v.AddArg(destptr) @@ -6056,8 +6435,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 5 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store8) v.AuxInt = 4 v.AddArg(destptr) @@ -6079,8 +6458,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 6 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store16) v.AuxInt = 4 v.AddArg(destptr) @@ -6102,8 +6481,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { if v.AuxInt != 7 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store32) v.AuxInt = 3 v.AddArg(destptr) @@ -6124,8 +6503,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (I64Store destptr (I64Const [0]) mem)) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s%8 != 0 && s > 8) { break } @@ -6147,6 +6526,8 @@ func rewriteValueWasm_OpZero_0(v *Value) bool { return false } func rewriteValueWasm_OpZero_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Zero [16] destptr mem) @@ -6155,8 +6536,8 @@ func rewriteValueWasm_OpZero_10(v *Value) bool { if v.AuxInt != 16 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 8 v.AddArg(destptr) @@ -6178,8 +6559,8 @@ func rewriteValueWasm_OpZero_10(v *Value) bool { if v.AuxInt != 24 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 16 v.AddArg(destptr) @@ -6208,8 +6589,8 @@ func rewriteValueWasm_OpZero_10(v *Value) bool { if v.AuxInt != 32 { break } - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 v.reset(OpWasmI64Store) v.AuxInt = 24 v.AddArg(destptr) @@ -6244,8 +6625,8 @@ func rewriteValueWasm_OpZero_10(v *Value) bool { // result: (LoweredZero [s/8] destptr mem) for { s := v.AuxInt - mem := v.Args[1] - destptr := v.Args[0] + destptr := v_0 + mem := v_1 if !(s%8 == 0 && s > 32) { break } @@ -6258,12 +6639,13 @@ func rewriteValueWasm_OpZero_10(v *Value) bool { return false } func rewriteValueWasm_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt16to32 x:(I64Load16U _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load16U { break } @@ -6276,7 +6658,7 @@ func rewriteValueWasm_OpZeroExt16to32_0(v *Value) bool { // match: (ZeroExt16to32 x) // result: (I64And x (I64Const [0xffff])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64And) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6286,12 +6668,13 @@ func rewriteValueWasm_OpZeroExt16to32_0(v *Value) bool { } } func rewriteValueWasm_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt16to64 x:(I64Load16U _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load16U { break } @@ -6304,7 +6687,7 @@ func rewriteValueWasm_OpZeroExt16to64_0(v *Value) bool { // match: (ZeroExt16to64 x) // result: (I64And x (I64Const [0xffff])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64And) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6314,12 +6697,13 @@ func rewriteValueWasm_OpZeroExt16to64_0(v *Value) bool { } } func rewriteValueWasm_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt32to64 x:(I64Load32U _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load32U { break } @@ -6332,7 +6716,7 @@ func rewriteValueWasm_OpZeroExt32to64_0(v *Value) bool { // match: (ZeroExt32to64 x) // result: (I64And x (I64Const [0xffffffff])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64And) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6342,12 +6726,13 @@ func rewriteValueWasm_OpZeroExt32to64_0(v *Value) bool { } } func rewriteValueWasm_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt8to16 x:(I64Load8U _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load8U { break } @@ -6360,7 +6745,7 @@ func rewriteValueWasm_OpZeroExt8to16_0(v *Value) bool { // match: (ZeroExt8to16 x) // result: (I64And x (I64Const [0xff])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64And) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6370,12 +6755,13 @@ func rewriteValueWasm_OpZeroExt8to16_0(v *Value) bool { } } func rewriteValueWasm_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt8to32 x:(I64Load8U _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load8U { break } @@ -6388,7 +6774,7 @@ func rewriteValueWasm_OpZeroExt8to32_0(v *Value) bool { // match: (ZeroExt8to32 x) // result: (I64And x (I64Const [0xff])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64And) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -6398,12 +6784,13 @@ func rewriteValueWasm_OpZeroExt8to32_0(v *Value) bool { } } func rewriteValueWasm_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt8to64 x:(I64Load8U _ _)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpWasmI64Load8U { break } @@ -6416,7 +6803,7 @@ func rewriteValueWasm_OpZeroExt8to64_0(v *Value) bool { // match: (ZeroExt8to64 x) // result: (I64And x (I64Const [0xff])) for { - x := v.Args[0] + x := v_0 v.reset(OpWasmI64And) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index d6624b1083..1d1dac7866 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -33,10 +33,10 @@ func rewriteValuedec(v *Value) bool { return false } func rewriteValuedec_OpComplexImag_0(v *Value) bool { + v_0 := v.Args[0] // match: (ComplexImag (ComplexMake _ imag )) // result: imag for { - v_0 := v.Args[0] if v_0.Op != OpComplexMake { break } @@ -49,10 +49,10 @@ func rewriteValuedec_OpComplexImag_0(v *Value) bool { return false } func rewriteValuedec_OpComplexReal_0(v *Value) bool { + v_0 := v.Args[0] // match: (ComplexReal (ComplexMake real _ )) // result: real for { - v_0 := v.Args[0] if v_0.Op != OpComplexMake { break } @@ -66,10 +66,10 @@ func rewriteValuedec_OpComplexReal_0(v *Value) bool { return false } func rewriteValuedec_OpIData_0(v *Value) bool { + v_0 := v.Args[0] // match: (IData (IMake _ data)) // result: data for { - v_0 := v.Args[0] if v_0.Op != OpIMake { break } @@ -82,10 +82,10 @@ func rewriteValuedec_OpIData_0(v *Value) bool { return false } func rewriteValuedec_OpITab_0(v *Value) bool { + v_0 := v.Args[0] // match: (ITab (IMake itab _)) // result: itab for { - v_0 := v.Args[0] if v_0.Op != OpIMake { break } @@ -99,6 +99,8 @@ func rewriteValuedec_OpITab_0(v *Value) bool { return false } func rewriteValuedec_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -107,8 +109,8 @@ func rewriteValuedec_OpLoad_0(v *Value) bool { // result: (ComplexMake (Load ptr mem) (Load (OffPtr [4] ptr) mem) ) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsComplex() && t.Size() == 8) { break } @@ -131,8 +133,8 @@ func rewriteValuedec_OpLoad_0(v *Value) bool { // result: (ComplexMake (Load ptr mem) (Load (OffPtr [8] ptr) mem) ) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsComplex() && t.Size() == 16) { break } @@ -155,8 +157,8 @@ func rewriteValuedec_OpLoad_0(v *Value) bool { // result: (StringMake (Load ptr mem) (Load (OffPtr [config.PtrSize] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsString()) { break } @@ -179,8 +181,8 @@ func rewriteValuedec_OpLoad_0(v *Value) bool { // result: (SliceMake (Load ptr mem) (Load (OffPtr [config.PtrSize] ptr) mem) (Load (OffPtr [2*config.PtrSize] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsSlice()) { break } @@ -210,8 +212,8 @@ func rewriteValuedec_OpLoad_0(v *Value) bool { // result: (IMake (Load ptr mem) (Load (OffPtr [config.PtrSize] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsInterface()) { break } @@ -232,10 +234,10 @@ func rewriteValuedec_OpLoad_0(v *Value) bool { return false } func rewriteValuedec_OpSliceCap_0(v *Value) bool { + v_0 := v.Args[0] // match: (SliceCap (SliceMake _ _ cap)) // result: cap for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -248,10 +250,10 @@ func rewriteValuedec_OpSliceCap_0(v *Value) bool { return false } func rewriteValuedec_OpSliceLen_0(v *Value) bool { + v_0 := v.Args[0] // match: (SliceLen (SliceMake _ len _)) // result: len for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -265,10 +267,10 @@ func rewriteValuedec_OpSliceLen_0(v *Value) bool { return false } func rewriteValuedec_OpSlicePtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (SlicePtr (SliceMake ptr _ _ )) // result: ptr for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -282,6 +284,9 @@ func rewriteValuedec_OpSlicePtr_0(v *Value) bool { return false } func rewriteValuedec_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -290,14 +295,13 @@ func rewriteValuedec_OpStore_0(v *Value) bool { // result: (Store {typ.Float32} (OffPtr [4] dst) imag (Store {typ.Float32} dst real mem)) for { t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpComplexMake { break } imag := v_1.Args[1] real := v_1.Args[0] + mem := v_2 if !(t.(*types.Type).Size() == 8) { break } @@ -321,14 +325,13 @@ func rewriteValuedec_OpStore_0(v *Value) bool { // result: (Store {typ.Float64} (OffPtr [8] dst) imag (Store {typ.Float64} dst real mem)) for { t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpComplexMake { break } imag := v_1.Args[1] real := v_1.Args[0] + mem := v_2 if !(t.(*types.Type).Size() == 16) { break } @@ -350,14 +353,13 @@ func rewriteValuedec_OpStore_0(v *Value) bool { // match: (Store dst (StringMake ptr len) mem) // result: (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem)) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpStringMake { break } len := v_1.Args[1] ptr := v_1.Args[0] + mem := v_2 v.reset(OpStore) v.Aux = typ.Int v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) @@ -376,15 +378,14 @@ func rewriteValuedec_OpStore_0(v *Value) bool { // match: (Store dst (SliceMake ptr len cap) mem) // result: (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpSliceMake { break } cap := v_1.Args[2] ptr := v_1.Args[0] len := v_1.Args[1] + mem := v_2 v.reset(OpStore) v.Aux = typ.Int v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) @@ -411,14 +412,13 @@ func rewriteValuedec_OpStore_0(v *Value) bool { // match: (Store dst (IMake itab data) mem) // result: (Store {typ.BytePtr} (OffPtr [config.PtrSize] dst) data (Store {typ.Uintptr} dst itab mem)) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpIMake { break } data := v_1.Args[1] itab := v_1.Args[0] + mem := v_2 v.reset(OpStore) v.Aux = typ.BytePtr v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) @@ -437,10 +437,10 @@ func rewriteValuedec_OpStore_0(v *Value) bool { return false } func rewriteValuedec_OpStringLen_0(v *Value) bool { + v_0 := v.Args[0] // match: (StringLen (StringMake _ len)) // result: len for { - v_0 := v.Args[0] if v_0.Op != OpStringMake { break } @@ -453,10 +453,10 @@ func rewriteValuedec_OpStringLen_0(v *Value) bool { return false } func rewriteValuedec_OpStringPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (StringPtr (StringMake ptr _)) // result: ptr for { - v_0 := v.Args[0] if v_0.Op != OpStringMake { break } diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 34645ce00b..28b3ab745b 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -127,13 +127,15 @@ func rewriteValuedec64(v *Value) bool { return false } func rewriteValuedec64_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Add64 x y) // result: (Int64Make (Add32withcarry (Int64Hi x) (Int64Hi y) (Select1 (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 (Add32carry (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -167,13 +169,15 @@ func rewriteValuedec64_OpAdd64_0(v *Value) bool { } } func rewriteValuedec64_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (And64 x y) // result: (Int64Make (And32 (Int64Hi x) (Int64Hi y)) (And32 (Int64Lo x) (Int64Lo y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -281,12 +285,13 @@ func rewriteValuedec64_OpArg_0(v *Value) bool { return false } func rewriteValuedec64_OpBitLen64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (Add32 (BitLen32 (Int64Hi x)) (BitLen32 (Or32 (Int64Lo x) (Zeromask (Int64Hi x))))) for { - x := v.Args[0] + x := v_0 v.reset(OpAdd32) v.Type = typ.Int v0 := b.NewValue0(v.Pos, OpBitLen32, typ.Int) @@ -310,12 +315,13 @@ func rewriteValuedec64_OpBitLen64_0(v *Value) bool { } } func rewriteValuedec64_OpBswap64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Bswap64 x) // result: (Int64Make (Bswap32 (Int64Lo x)) (Bswap32 (Int64Hi x))) for { - x := v.Args[0] + x := v_0 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) @@ -331,12 +337,13 @@ func rewriteValuedec64_OpBswap64_0(v *Value) bool { } } func rewriteValuedec64_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Com64 x) // result: (Int64Make (Com32 (Int64Hi x)) (Com32 (Int64Lo x))) for { - x := v.Args[0] + x := v_0 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -393,12 +400,13 @@ func rewriteValuedec64_OpConst64_0(v *Value) bool { return false } func rewriteValuedec64_OpCtz64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz64 x) // result: (Add32 (Ctz32 (Int64Lo x)) (And32 (Com32 (Zeromask (Int64Lo x))) (Ctz32 (Int64Hi x)))) for { - x := v.Args[0] + x := v_0 v.reset(OpAdd32) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32) @@ -424,23 +432,26 @@ func rewriteValuedec64_OpCtz64_0(v *Value) bool { } } func rewriteValuedec64_OpCtz64NonZero_0(v *Value) bool { + v_0 := v.Args[0] // match: (Ctz64NonZero x) // result: (Ctz64 x) for { - x := v.Args[0] + x := v_0 v.reset(OpCtz64) v.AddArg(x) return true } } func rewriteValuedec64_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq64 x y) // result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpAndB) v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -462,13 +473,15 @@ func rewriteValuedec64_OpEq64_0(v *Value) bool { } } func rewriteValuedec64_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64 x y) // result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpGreater32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -500,13 +513,15 @@ func rewriteValuedec64_OpGeq64_0(v *Value) bool { } } func rewriteValuedec64_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Geq64U x y) // result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpGreater32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -538,13 +553,15 @@ func rewriteValuedec64_OpGeq64U_0(v *Value) bool { } } func rewriteValuedec64_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater64 x y) // result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpGreater32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -576,13 +593,15 @@ func rewriteValuedec64_OpGreater64_0(v *Value) bool { } } func rewriteValuedec64_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Greater64U x y) // result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpGreater32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -614,10 +633,10 @@ func rewriteValuedec64_OpGreater64U_0(v *Value) bool { } } func rewriteValuedec64_OpInt64Hi_0(v *Value) bool { + v_0 := v.Args[0] // match: (Int64Hi (Int64Make hi _)) // result: hi for { - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } @@ -631,10 +650,10 @@ func rewriteValuedec64_OpInt64Hi_0(v *Value) bool { return false } func rewriteValuedec64_OpInt64Lo_0(v *Value) bool { + v_0 := v.Args[0] // match: (Int64Lo (Int64Make _ lo)) // result: lo for { - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } @@ -647,13 +666,15 @@ func rewriteValuedec64_OpInt64Lo_0(v *Value) bool { return false } func rewriteValuedec64_OpLeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64 x y) // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -685,13 +706,15 @@ func rewriteValuedec64_OpLeq64_0(v *Value) bool { } } func rewriteValuedec64_OpLeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64U x y) // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -723,13 +746,15 @@ func rewriteValuedec64_OpLeq64U_0(v *Value) bool { } } func rewriteValuedec64_OpLess64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less64 x y) // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -761,13 +786,15 @@ func rewriteValuedec64_OpLess64_0(v *Value) bool { } } func rewriteValuedec64_OpLess64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less64U x y) // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -799,6 +826,8 @@ func rewriteValuedec64_OpLess64U_0(v *Value) bool { } } func rewriteValuedec64_OpLoad_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -807,8 +836,8 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool { // result: (Int64Make (Load (OffPtr [4] ptr) mem) (Load ptr mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) { break } @@ -831,8 +860,8 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool { // result: (Int64Make (Load (OffPtr [4] ptr) mem) (Load ptr mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) { break } @@ -855,8 +884,8 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool { // result: (Int64Make (Load ptr mem) (Load (OffPtr [4] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) { break } @@ -879,8 +908,8 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool { // result: (Int64Make (Load ptr mem) (Load (OffPtr [4] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) { break } @@ -901,14 +930,14 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -928,9 +957,7 @@ func rewriteValuedec64_OpLsh16x64_0(v *Value) bool { // match: (Lsh16x64 x (Int64Make (Const32 [0]) lo)) // result: (Lsh16x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -948,9 +975,7 @@ func rewriteValuedec64_OpLsh16x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Lsh16x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -972,14 +997,14 @@ func rewriteValuedec64_OpLsh16x64_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -999,9 +1024,7 @@ func rewriteValuedec64_OpLsh32x64_0(v *Value) bool { // match: (Lsh32x64 x (Int64Make (Const32 [0]) lo)) // result: (Lsh32x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1019,9 +1042,7 @@ func rewriteValuedec64_OpLsh32x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Lsh32x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1043,18 +1064,19 @@ func rewriteValuedec64_OpLsh32x64_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 (Int64Make hi lo) s) // result: (Int64Make (Or32 (Or32 (Lsh32x16 hi s) (Rsh32Ux16 lo (Sub16 (Const16 [32]) s))) (Lsh32x16 lo (Sub16 s (Const16 [32])))) (Lsh32x16 lo s)) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) @@ -1091,18 +1113,19 @@ func rewriteValuedec64_OpLsh64x16_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 (Int64Make hi lo) s) // result: (Int64Make (Or32 (Or32 (Lsh32x32 hi s) (Rsh32Ux32 lo (Sub32 (Const32 [32]) s))) (Lsh32x32 lo (Sub32 s (Const32 [32])))) (Lsh32x32 lo s)) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) @@ -1139,14 +1162,14 @@ func rewriteValuedec64_OpLsh64x32_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const64 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -1166,9 +1189,7 @@ func rewriteValuedec64_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 x (Int64Make (Const32 [0]) lo)) // result: (Lsh64x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1186,9 +1207,7 @@ func rewriteValuedec64_OpLsh64x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Lsh64x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1210,18 +1229,19 @@ func rewriteValuedec64_OpLsh64x64_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 (Int64Make hi lo) s) // result: (Int64Make (Or32 (Or32 (Lsh32x8 hi s) (Rsh32Ux8 lo (Sub8 (Const8 [32]) s))) (Lsh32x8 lo (Sub8 s (Const8 [32])))) (Lsh32x8 lo s)) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) @@ -1258,14 +1278,14 @@ func rewriteValuedec64_OpLsh64x8_0(v *Value) bool { return false } func rewriteValuedec64_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -1285,9 +1305,7 @@ func rewriteValuedec64_OpLsh8x64_0(v *Value) bool { // match: (Lsh8x64 x (Int64Make (Const32 [0]) lo)) // result: (Lsh8x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1305,9 +1323,7 @@ func rewriteValuedec64_OpLsh8x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Lsh8x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1329,13 +1345,15 @@ func rewriteValuedec64_OpLsh8x64_0(v *Value) bool { return false } func rewriteValuedec64_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul64 x y) // result: (Int64Make (Add32 (Mul32 (Int64Lo x) (Int64Hi y)) (Add32 (Mul32 (Int64Hi x) (Int64Lo y)) (Select0 (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 (Mul32uhilo (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) @@ -1381,12 +1399,13 @@ func rewriteValuedec64_OpMul64_0(v *Value) bool { } } func rewriteValuedec64_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Neg64 x) // result: (Sub64 (Const64 [0]) x) for { t := v.Type - x := v.Args[0] + x := v_0 v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = 0 @@ -1396,13 +1415,15 @@ func rewriteValuedec64_OpNeg64_0(v *Value) bool { } } func rewriteValuedec64_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq64 x y) // result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpOrB) v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -1424,13 +1445,15 @@ func rewriteValuedec64_OpNeq64_0(v *Value) bool { } } func rewriteValuedec64_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Or64 x y) // result: (Int64Make (Or32 (Int64Hi x) (Int64Hi y)) (Or32 (Int64Lo x) (Int64Lo y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -1452,14 +1475,14 @@ func rewriteValuedec64_OpOr64_0(v *Value) bool { } } func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -1479,9 +1502,7 @@ func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool { // match: (Rsh16Ux64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh16Ux32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1499,9 +1520,7 @@ func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh16Ux32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1523,15 +1542,15 @@ func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Signmask (SignExt16to32 x)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1553,9 +1572,7 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool { // match: (Rsh16x64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh16x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1573,9 +1590,7 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh16x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1597,14 +1612,14 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -1624,9 +1639,7 @@ func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh32Ux32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1644,9 +1657,7 @@ func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh32Ux32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1668,15 +1679,15 @@ func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Signmask x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1696,9 +1707,7 @@ func rewriteValuedec64_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh32x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1716,9 +1725,7 @@ func rewriteValuedec64_OpRsh32x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh32x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1740,18 +1747,19 @@ func rewriteValuedec64_OpRsh32x64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 (Int64Make hi lo) s) // result: (Int64Make (Rsh32Ux16 hi s) (Or32 (Or32 (Rsh32Ux16 lo s) (Lsh32x16 hi (Sub16 (Const16 [32]) s))) (Rsh32Ux16 hi (Sub16 s (Const16 [32]))))) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) v0.AddArg(hi) @@ -1788,18 +1796,19 @@ func rewriteValuedec64_OpRsh64Ux16_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 (Int64Make hi lo) s) // result: (Int64Make (Rsh32Ux32 hi s) (Or32 (Or32 (Rsh32Ux32 lo s) (Lsh32x32 hi (Sub32 (Const32 [32]) s))) (Rsh32Ux32 hi (Sub32 s (Const32 [32]))))) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) v0.AddArg(hi) @@ -1836,14 +1845,14 @@ func rewriteValuedec64_OpRsh64Ux32_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const64 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -1863,9 +1872,7 @@ func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh64Ux32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1883,9 +1890,7 @@ func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh64Ux32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -1907,18 +1912,19 @@ func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 (Int64Make hi lo) s) // result: (Int64Make (Rsh32Ux8 hi s) (Or32 (Or32 (Rsh32Ux8 lo s) (Lsh32x8 hi (Sub8 (Const8 [32]) s))) (Rsh32Ux8 hi (Sub8 s (Const8 [32]))))) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) v0.AddArg(hi) @@ -1955,18 +1961,19 @@ func rewriteValuedec64_OpRsh64Ux8_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 (Int64Make hi lo) s) // result: (Int64Make (Rsh32x16 hi s) (Or32 (Or32 (Rsh32Ux16 lo s) (Lsh32x16 hi (Sub16 (Const16 [32]) s))) (And32 (Rsh32x16 hi (Sub16 s (Const16 [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 s (Const32 [5]))))))) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) v0.AddArg(hi) @@ -2015,18 +2022,19 @@ func rewriteValuedec64_OpRsh64x16_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 (Int64Make hi lo) s) // result: (Int64Make (Rsh32x32 hi s) (Or32 (Or32 (Rsh32Ux32 lo s) (Lsh32x32 hi (Sub32 (Const32 [32]) s))) (And32 (Rsh32x32 hi (Sub32 s (Const32 [32]))) (Zeromask (Rsh32Ux32 s (Const32 [5])))))) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) v0.AddArg(hi) @@ -2073,15 +2081,15 @@ func rewriteValuedec64_OpRsh64x32_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x64 x (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2110,9 +2118,7 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh64x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2130,9 +2136,7 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh64x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2154,18 +2158,19 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 (Int64Make hi lo) s) // result: (Int64Make (Rsh32x8 hi s) (Or32 (Or32 (Rsh32Ux8 lo s) (Lsh32x8 hi (Sub8 (Const8 [32]) s))) (And32 (Rsh32x8 hi (Sub8 s (Const8 [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 s (Const32 [5]))))))) for { - s := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } lo := v_0.Args[1] hi := v_0.Args[0] + s := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) v0.AddArg(hi) @@ -2214,14 +2219,14 @@ func rewriteValuedec64_OpRsh64x8_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpInt64Make { break } @@ -2241,9 +2246,7 @@ func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool { // match: (Rsh8Ux64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh8Ux32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2261,9 +2264,7 @@ func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh8Ux32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2285,15 +2286,15 @@ func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool { return false } func rewriteValuedec64_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x (Int64Make (Const32 [c]) _)) // cond: c != 0 // result: (Signmask (SignExt8to32 x)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2315,9 +2316,7 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool { // match: (Rsh8x64 x (Int64Make (Const32 [0]) lo)) // result: (Rsh8x32 x lo) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2335,9 +2334,7 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool { // cond: hi.Op != OpConst32 // result: (Rsh8x32 x (Or32 (Zeromask hi) lo)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpInt64Make { break } @@ -2359,12 +2356,13 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool { return false } func rewriteValuedec64_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt16to64 x) // result: (SignExt32to64 (SignExt16to32 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpSignExt32to64) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) @@ -2373,12 +2371,13 @@ func rewriteValuedec64_OpSignExt16to64_0(v *Value) bool { } } func rewriteValuedec64_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt32to64 x) // result: (Int64Make (Signmask x) x) for { - x := v.Args[0] + x := v_0 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) v0.AddArg(x) @@ -2388,12 +2387,13 @@ func rewriteValuedec64_OpSignExt32to64_0(v *Value) bool { } } func rewriteValuedec64_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SignExt8to64 x) // result: (SignExt32to64 (SignExt8to32 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpSignExt32to64) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) @@ -2402,6 +2402,9 @@ func rewriteValuedec64_OpSignExt8to64_0(v *Value) bool { } } func rewriteValuedec64_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (Store {t} dst (Int64Make hi lo) mem) @@ -2409,14 +2412,13 @@ func rewriteValuedec64_OpStore_0(v *Value) bool { // result: (Store {hi.Type} (OffPtr [4] dst) hi (Store {lo.Type} dst lo mem)) for { t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpInt64Make { break } lo := v_1.Args[1] hi := v_1.Args[0] + mem := v_2 if !(t.(*types.Type).Size() == 8 && !config.BigEndian) { break } @@ -2440,14 +2442,13 @@ func rewriteValuedec64_OpStore_0(v *Value) bool { // result: (Store {lo.Type} (OffPtr [4] dst) lo (Store {hi.Type} dst hi mem)) for { t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpInt64Make { break } lo := v_1.Args[1] hi := v_1.Args[0] + mem := v_2 if !(t.(*types.Type).Size() == 8 && config.BigEndian) { break } @@ -2469,13 +2470,15 @@ func rewriteValuedec64_OpStore_0(v *Value) bool { return false } func rewriteValuedec64_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Sub64 x y) // result: (Int64Make (Sub32withcarry (Int64Hi x) (Int64Hi y) (Select1 (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 (Sub32carry (Int64Lo x) (Int64Lo y)))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -2509,10 +2512,10 @@ func rewriteValuedec64_OpSub64_0(v *Value) bool { } } func rewriteValuedec64_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 (Int64Make _ lo)) // result: (Trunc32to16 lo) for { - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } @@ -2524,10 +2527,10 @@ func rewriteValuedec64_OpTrunc64to16_0(v *Value) bool { return false } func rewriteValuedec64_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 (Int64Make _ lo)) // result: lo for { - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } @@ -2540,10 +2543,10 @@ func rewriteValuedec64_OpTrunc64to32_0(v *Value) bool { return false } func rewriteValuedec64_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 (Int64Make _ lo)) // result: (Trunc32to8 lo) for { - v_0 := v.Args[0] if v_0.Op != OpInt64Make { break } @@ -2555,13 +2558,15 @@ func rewriteValuedec64_OpTrunc64to8_0(v *Value) bool { return false } func rewriteValuedec64_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Xor64 x y) // result: (Int64Make (Xor32 (Int64Hi x) (Int64Hi y)) (Xor32 (Int64Lo x) (Int64Lo y))) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) @@ -2583,12 +2588,13 @@ func rewriteValuedec64_OpXor64_0(v *Value) bool { } } func rewriteValuedec64_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt16to64 x) // result: (ZeroExt32to64 (ZeroExt16to32 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpZeroExt32to64) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) @@ -2597,12 +2603,13 @@ func rewriteValuedec64_OpZeroExt16to64_0(v *Value) bool { } } func rewriteValuedec64_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt32to64 x) // result: (Int64Make (Const32 [0]) x) for { - x := v.Args[0] + x := v_0 v.reset(OpInt64Make) v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) v0.AuxInt = 0 @@ -2612,12 +2619,13 @@ func rewriteValuedec64_OpZeroExt32to64_0(v *Value) bool { } } func rewriteValuedec64_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ZeroExt8to64 x) // result: (ZeroExt32to64 (ZeroExt8to32 x)) for { - x := v.Args[0] + x := v_0 v.reset(OpZeroExt32to64) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index ed7c47fb61..ea0d92c81b 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -474,18 +474,17 @@ func rewriteValuegeneric(v *Value) bool { return false } func rewriteValuegeneric_OpAdd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Add16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c+d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -500,26 +499,27 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // result: (Mul16 x (Add16 y z)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMul16 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - y := v_0.Args[1^_i1] - v_1 := v.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - if x != v_1.Args[_i2] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i2] + z := v_1_1 v.reset(OpMul16) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd16, t) @@ -535,13 +535,11 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // match: (Add16 (Const16 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -552,14 +550,8 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // match: (Add16 (Const16 [1]) (Com16 x)) // result: (Neg16 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] - if v_0.Op != OpConst16 || v_0.AuxInt != 1 { - continue - } - v_1 := v.Args[1^_i0] - if v_1.Op != OpCom16 { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || v_0.AuxInt != 1 || v_1.Op != OpCom16 { continue } x := v_1.Args[0] @@ -573,21 +565,21 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Add16 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAdd16 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst16 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst16 && x.Op != OpConst16) { continue } @@ -606,9 +598,7 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Sub16 x z)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub16 { continue } @@ -618,7 +608,7 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst16 && x.Op != OpConst16) { continue } @@ -636,9 +626,7 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Sub16 (Add16 x z) i) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub16 { continue } @@ -649,7 +637,7 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst16 && x.Op != OpConst16) { continue } @@ -666,26 +654,24 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // match: (Add16 (Const16 [c]) (Add16 (Const16 [d]) x)) // result: (Add16 (Const16 [int64(int16(c+d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) @@ -699,15 +685,12 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // match: (Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) // result: (Sub16 (Const16 [int64(int16(c+d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub16 { continue } @@ -729,15 +712,12 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { // match: (Add16 (Const16 [c]) (Sub16 x (Const16 [d]))) // result: (Add16 (Const16 [int64(int16(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub16 { continue } @@ -760,18 +740,17 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { return false } func rewriteValuegeneric_OpAdd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Add32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c+d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -786,26 +765,27 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // result: (Mul32 x (Add32 y z)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMul32 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - y := v_0.Args[1^_i1] - v_1 := v.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - if x != v_1.Args[_i2] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i2] + z := v_1_1 v.reset(OpMul32) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd32, t) @@ -821,13 +801,11 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // match: (Add32 (Const32 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -838,14 +816,8 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // match: (Add32 (Const32 [1]) (Com32 x)) // result: (Neg32 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] - if v_0.Op != OpConst32 || v_0.AuxInt != 1 { - continue - } - v_1 := v.Args[1^_i0] - if v_1.Op != OpCom32 { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || v_0.AuxInt != 1 || v_1.Op != OpCom32 { continue } x := v_1.Args[0] @@ -859,21 +831,21 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Add32 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAdd32 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst32 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst32 && x.Op != OpConst32) { continue } @@ -892,9 +864,7 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Sub32 x z)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub32 { continue } @@ -904,7 +874,7 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst32 && x.Op != OpConst32) { continue } @@ -922,9 +892,7 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Sub32 (Add32 x z) i) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub32 { continue } @@ -935,7 +903,7 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst32 && x.Op != OpConst32) { continue } @@ -952,26 +920,24 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // match: (Add32 (Const32 [c]) (Add32 (Const32 [d]) x)) // result: (Add32 (Const32 [int64(int32(c+d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) @@ -985,15 +951,12 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // match: (Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) // result: (Sub32 (Const32 [int64(int32(c+d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub32 { continue } @@ -1015,15 +978,12 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { // match: (Add32 (Const32 [c]) (Sub32 x (Const32 [d]))) // result: (Add32 (Const32 [int64(int32(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub32 { continue } @@ -1046,17 +1006,16 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { return false } func rewriteValuegeneric_OpAdd32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add32F (Const32F [c]) (Const32F [d])) // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32F { continue } @@ -1070,18 +1029,17 @@ func rewriteValuegeneric_OpAdd32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpAdd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Add64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c+d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -1096,26 +1054,27 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // result: (Mul64 x (Add64 y z)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMul64 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - y := v_0.Args[1^_i1] - v_1 := v.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - if x != v_1.Args[_i2] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i2] + z := v_1_1 v.reset(OpMul64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd64, t) @@ -1131,13 +1090,11 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // match: (Add64 (Const64 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1148,14 +1105,8 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // match: (Add64 (Const64 [1]) (Com64 x)) // result: (Neg64 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] - if v_0.Op != OpConst64 || v_0.AuxInt != 1 { - continue - } - v_1 := v.Args[1^_i0] - if v_1.Op != OpCom64 { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || v_0.AuxInt != 1 || v_1.Op != OpCom64 { continue } x := v_1.Args[0] @@ -1169,21 +1120,21 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Add64 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAdd64 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst64 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } @@ -1202,9 +1153,7 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Sub64 x z)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub64 { continue } @@ -1214,7 +1163,7 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } @@ -1232,9 +1181,7 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Sub64 (Add64 x z) i) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub64 { continue } @@ -1245,7 +1192,7 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } @@ -1262,26 +1209,24 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // match: (Add64 (Const64 [c]) (Add64 (Const64 [d]) x)) // result: (Add64 (Const64 [c+d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d @@ -1295,15 +1240,12 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // match: (Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) // result: (Sub64 (Const64 [c+d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub64 { continue } @@ -1325,15 +1267,12 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { // match: (Add64 (Const64 [c]) (Sub64 x (Const64 [d]))) // result: (Add64 (Const64 [c-d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub64 { continue } @@ -1356,17 +1295,16 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { return false } func rewriteValuegeneric_OpAdd64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Add64F (Const64F [c]) (Const64F [d])) // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64F { continue } @@ -1380,18 +1318,17 @@ func rewriteValuegeneric_OpAdd64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpAdd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Add8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c+d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -1406,26 +1343,27 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // result: (Mul8 x (Add8 y z)) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMul8 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - y := v_0.Args[1^_i1] - v_1 := v.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul8 { continue } _ = v_1.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - if x != v_1.Args[_i2] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i2] + z := v_1_1 v.reset(OpMul8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAdd8, t) @@ -1441,13 +1379,11 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // match: (Add8 (Const8 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1458,14 +1394,8 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // match: (Add8 (Const8 [1]) (Com8 x)) // result: (Neg8 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] - if v_0.Op != OpConst8 || v_0.AuxInt != 1 { - continue - } - v_1 := v.Args[1^_i0] - if v_1.Op != OpCom8 { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || v_0.AuxInt != 1 || v_1.Op != OpCom8 { continue } x := v_1.Args[0] @@ -1479,21 +1409,21 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Add8 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAdd8 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst8 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst8 && x.Op != OpConst8) { continue } @@ -1512,9 +1442,7 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Sub8 x z)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub8 { continue } @@ -1524,7 +1452,7 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst8 && x.Op != OpConst8) { continue } @@ -1542,9 +1470,7 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Sub8 (Add8 x z) i) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpSub8 { continue } @@ -1555,7 +1481,7 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { continue } t := i.Type - x := v.Args[1^_i0] + x := v_1 if !(z.Op != OpConst8 && x.Op != OpConst8) { continue } @@ -1572,26 +1498,24 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // match: (Add8 (Const8 [c]) (Add8 (Const8 [d]) x)) // result: (Add8 (Const8 [int64(int8(c+d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) @@ -1605,15 +1529,12 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // match: (Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // result: (Sub8 (Const8 [int64(int8(c+d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub8 { continue } @@ -1635,15 +1556,12 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { // match: (Add8 (Const8 [c]) (Sub8 x (Const8 [d]))) // result: (Add8 (Const8 [int64(int8(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpSub8 { continue } @@ -1666,13 +1584,13 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { return false } func rewriteValuegeneric_OpAddPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (AddPtr x (Const64 [c])) // result: (OffPtr x [c]) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -1687,9 +1605,7 @@ func rewriteValuegeneric_OpAddPtr_0(v *Value) bool { // result: (OffPtr x [c]) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -1703,18 +1619,17 @@ func rewriteValuegeneric_OpAddPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpAnd16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (And16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c&d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -1729,14 +1644,11 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // cond: c >= 64-ntz(m) // result: (Const16 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpRsh16Ux64 { continue } @@ -1759,14 +1671,11 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // cond: c >= 64-nlz(m) // result: (Const16 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpLsh16x64 { continue } @@ -1788,8 +1697,8 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // match: (And16 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -1800,13 +1709,11 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // match: (And16 (Const16 [-1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -1817,9 +1724,7 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // match: (And16 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != 0 { continue } @@ -1832,19 +1737,19 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // match: (And16 x (And16 x y)) // result: (And16 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAnd16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpAnd16) v.AddArg(x) v.AddArg(y) @@ -1857,21 +1762,21 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (And16 i (And16 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd16 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst16 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst16 && x.Op != OpConst16) { continue } @@ -1889,26 +1794,24 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { // match: (And16 (Const16 [c]) (And16 (Const16 [d]) x)) // result: (And16 (Const16 [int64(int16(c&d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAnd16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAnd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c & d)) @@ -1922,18 +1825,17 @@ func rewriteValuegeneric_OpAnd16_0(v *Value) bool { return false } func rewriteValuegeneric_OpAnd32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (And32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c&d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -1948,14 +1850,11 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // cond: c >= 64-ntz(m) // result: (Const32 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpRsh32Ux64 { continue } @@ -1978,14 +1877,11 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // cond: c >= 64-nlz(m) // result: (Const32 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpLsh32x64 { continue } @@ -2007,8 +1903,8 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // match: (And32 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -2019,13 +1915,11 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // match: (And32 (Const32 [-1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -2036,9 +1930,7 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // match: (And32 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } @@ -2051,19 +1943,19 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // match: (And32 x (And32 x y)) // result: (And32 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAnd32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpAnd32) v.AddArg(x) v.AddArg(y) @@ -2076,21 +1968,21 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (And32 i (And32 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd32 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst32 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst32 && x.Op != OpConst32) { continue } @@ -2108,26 +2000,24 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { // match: (And32 (Const32 [c]) (And32 (Const32 [d]) x)) // result: (And32 (Const32 [int64(int32(c&d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAnd32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAnd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c & d)) @@ -2141,18 +2031,17 @@ func rewriteValuegeneric_OpAnd32_0(v *Value) bool { return false } func rewriteValuegeneric_OpAnd64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (And64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c&d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -2167,14 +2056,11 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // cond: c >= 64-ntz(m) // result: (Const64 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpRsh64Ux64 { continue } @@ -2197,14 +2083,11 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // cond: c >= 64-nlz(m) // result: (Const64 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpLsh64x64 { continue } @@ -2226,8 +2109,8 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // match: (And64 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -2238,13 +2121,11 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // match: (And64 (Const64 [-1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -2255,9 +2136,7 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // match: (And64 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } @@ -2270,19 +2149,19 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // match: (And64 x (And64 x y)) // result: (And64 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAnd64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpAnd64) v.AddArg(x) v.AddArg(y) @@ -2295,21 +2174,21 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (And64 i (And64 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd64 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst64 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } @@ -2327,26 +2206,24 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { // match: (And64 (Const64 [c]) (And64 (Const64 [d]) x)) // result: (And64 (Const64 [c&d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAnd64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAnd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c & d @@ -2360,18 +2237,17 @@ func rewriteValuegeneric_OpAnd64_0(v *Value) bool { return false } func rewriteValuegeneric_OpAnd8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (And8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c&d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -2386,14 +2262,11 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // cond: c >= 64-ntz(m) // result: (Const8 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpRsh8Ux64 { continue } @@ -2416,14 +2289,11 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // cond: c >= 64-nlz(m) // result: (Const8 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } m := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpLsh8x64 { continue } @@ -2445,8 +2315,8 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // match: (And8 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -2457,13 +2327,11 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // match: (And8 (Const8 [-1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -2474,9 +2342,7 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // match: (And8 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != 0 { continue } @@ -2489,19 +2355,19 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // match: (And8 x (And8 x y)) // result: (And8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpAnd8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpAnd8) v.AddArg(x) v.AddArg(y) @@ -2514,21 +2380,21 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (And8 i (And8 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd8 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst8 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst8 && x.Op != OpConst8) { continue } @@ -2546,26 +2412,24 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { // match: (And8 (Const8 [c]) (And8 (Const8 [d]) x)) // result: (And8 (Const8 [int64(int8(c&d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAnd8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAnd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c & d)) @@ -2579,10 +2443,10 @@ func rewriteValuegeneric_OpAnd8_0(v *Value) bool { return false } func rewriteValuegeneric_OpArraySelect_0(v *Value) bool { + v_0 := v.Args[0] // match: (ArraySelect (ArrayMake1 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpArrayMake1 { break } @@ -2595,11 +2459,7 @@ func rewriteValuegeneric_OpArraySelect_0(v *Value) bool { // match: (ArraySelect [0] (IData x)) // result: (IData x) for { - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpIData { + if v.AuxInt != 0 || v_0.Op != OpIData { break } x := v_0.Args[0] @@ -2610,10 +2470,10 @@ func rewriteValuegeneric_OpArraySelect_0(v *Value) bool { return false } func rewriteValuegeneric_OpCom16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com16 (Com16 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpCom16 { break } @@ -2626,7 +2486,6 @@ func rewriteValuegeneric_OpCom16_0(v *Value) bool { // match: (Com16 (Const16 [c])) // result: (Const16 [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -2638,17 +2497,17 @@ func rewriteValuegeneric_OpCom16_0(v *Value) bool { // match: (Com16 (Add16 (Const16 [-1]) x)) // result: (Neg16 x) for { - v_0 := v.Args[0] if v_0.Op != OpAdd16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst16 || v_0_0.AuxInt != -1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpNeg16) v.AddArg(x) return true @@ -2658,10 +2517,10 @@ func rewriteValuegeneric_OpCom16_0(v *Value) bool { return false } func rewriteValuegeneric_OpCom32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com32 (Com32 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpCom32 { break } @@ -2674,7 +2533,6 @@ func rewriteValuegeneric_OpCom32_0(v *Value) bool { // match: (Com32 (Const32 [c])) // result: (Const32 [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -2686,17 +2544,17 @@ func rewriteValuegeneric_OpCom32_0(v *Value) bool { // match: (Com32 (Add32 (Const32 [-1]) x)) // result: (Neg32 x) for { - v_0 := v.Args[0] if v_0.Op != OpAdd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst32 || v_0_0.AuxInt != -1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpNeg32) v.AddArg(x) return true @@ -2706,10 +2564,10 @@ func rewriteValuegeneric_OpCom32_0(v *Value) bool { return false } func rewriteValuegeneric_OpCom64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com64 (Com64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpCom64 { break } @@ -2722,7 +2580,6 @@ func rewriteValuegeneric_OpCom64_0(v *Value) bool { // match: (Com64 (Const64 [c])) // result: (Const64 [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -2734,17 +2591,17 @@ func rewriteValuegeneric_OpCom64_0(v *Value) bool { // match: (Com64 (Add64 (Const64 [-1]) x)) // result: (Neg64 x) for { - v_0 := v.Args[0] if v_0.Op != OpAdd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst64 || v_0_0.AuxInt != -1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpNeg64) v.AddArg(x) return true @@ -2754,10 +2611,10 @@ func rewriteValuegeneric_OpCom64_0(v *Value) bool { return false } func rewriteValuegeneric_OpCom8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Com8 (Com8 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpCom8 { break } @@ -2770,7 +2627,6 @@ func rewriteValuegeneric_OpCom8_0(v *Value) bool { // match: (Com8 (Const8 [c])) // result: (Const8 [^c]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -2782,17 +2638,17 @@ func rewriteValuegeneric_OpCom8_0(v *Value) bool { // match: (Com8 (Add8 (Const8 [-1]) x)) // result: (Neg8 x) for { - v_0 := v.Args[0] if v_0.Op != OpAdd8 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst8 || v_0_0.AuxInt != -1 { continue } - x := v_0.Args[1^_i0] + x := v_0_1 v.reset(OpNeg8) v.AddArg(x) return true @@ -2935,26 +2791,27 @@ func rewriteValuegeneric_OpConstString_0(v *Value) bool { return false } func rewriteValuegeneric_OpConvert_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Convert (Add64 (Convert ptr mem) off) mem) // result: (Add64 ptr off) for { - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConvert { continue } - _ = v_0_0.Args[1] + mem := v_0_0.Args[1] ptr := v_0_0.Args[0] - if mem != v_0_0.Args[1] { + off := v_0_1 + if mem != v_1 { continue } - off := v_0.Args[1^_i0] v.reset(OpAdd64) v.AddArg(ptr) v.AddArg(off) @@ -2965,23 +2822,22 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { // match: (Convert (Add32 (Convert ptr mem) off) mem) // result: (Add32 ptr off) for { - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConvert { continue } - _ = v_0_0.Args[1] + mem := v_0_0.Args[1] ptr := v_0_0.Args[0] - if mem != v_0_0.Args[1] { + off := v_0_1 + if mem != v_1 { continue } - off := v_0.Args[1^_i0] v.reset(OpAdd32) v.AddArg(ptr) v.AddArg(off) @@ -2992,14 +2848,12 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { // match: (Convert (Convert ptr mem) mem) // result: ptr for { - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConvert { break } - _ = v_0.Args[1] + mem := v_0.Args[1] ptr := v_0.Args[0] - if mem != v_0.Args[1] { + if mem != v_1 { break } v.reset(OpCopy) @@ -3010,10 +2864,10 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto32 (Const32F [c])) // result: (Const32 [int64(int32(auxTo32F(c)))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } @@ -3025,10 +2879,10 @@ func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64 (Const32F [c])) // result: (Const64 [int64(auxTo32F(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } @@ -3040,10 +2894,10 @@ func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt32Fto64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32Fto64F (Const32F [c])) // result: (Const64F [c]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } @@ -3055,10 +2909,10 @@ func rewriteValuegeneric_OpCvt32Fto64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to32F (Const32 [c])) // result: (Const32F [auxFrom32F(float32(int32(c)))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -3070,10 +2924,10 @@ func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt32to64F (Const32 [c])) // result: (Const64F [auxFrom64F(float64(int32(c)))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -3085,10 +2939,10 @@ func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32 (Const64F [c])) // result: (Const32 [int64(int32(auxTo64F(c)))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } @@ -3100,10 +2954,10 @@ func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto32F (Const64F [c])) // result: (Const32F [auxFrom32F(float32(auxTo64F(c)))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } @@ -3115,10 +2969,10 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64Fto64 (Const64F [c])) // result: (Const64 [int64(auxTo64F(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } @@ -3130,10 +2984,10 @@ func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to32F (Const64 [c])) // result: (Const32F [auxFrom32F(float32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -3145,10 +2999,10 @@ func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Cvt64to64F (Const64 [c])) // result: (Const64F [auxFrom64F(float64(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -3160,19 +3014,18 @@ func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 (Const16 [c]) (Const16 [d])) // cond: d != 0 // result: (Const16 [int64(int16(c)/int16(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -3188,9 +3041,7 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { // cond: isNonNegative(n) && isPowerOfTwo(c&0xffff) // result: (Rsh16Ux64 n (Const64 [log2(c&0xffff)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -3210,9 +3061,7 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { // result: (Neg16 (Div16 n (Const16 [-c]))) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -3233,9 +3082,7 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { // result: (Rsh16Ux64 (And16 x (Neg16 x)) (Const64 [15])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 || v_1.AuxInt != -1<<15 { break } @@ -3256,9 +3103,7 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { // result: (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [16-log2(c)]))) (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -3291,9 +3136,7 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { // result: (Sub16 (Rsh32x64 (Mul32 (Const32 [int64(smagic(16,c).m)]) (SignExt16to32 x)) (Const64 [16+smagic(16,c).s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -3329,6 +3172,8 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -3336,13 +3181,10 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { // cond: d != 0 // result: (Const16 [int64(int16(uint16(c)/uint16(d)))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -3358,9 +3200,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { // cond: isPowerOfTwo(c&0xffff) // result: (Rsh16Ux64 n (Const64 [log2(c&0xffff)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -3379,9 +3219,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { // cond: umagicOK(16, c) && config.RegSize == 8 // result: (Trunc64to16 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<16+umagic(16,c).m)]) (ZeroExt16to64 x)) (Const64 [16+umagic(16,c).s]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -3409,9 +3247,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { // cond: umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0 // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Const32 [int64(1<<15+umagic(16,c).m/2)]) (ZeroExt16to32 x)) (Const64 [16+umagic(16,c).s-1]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -3439,9 +3275,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { // cond: umagicOK(16, c) && config.RegSize == 4 && c&1 == 0 // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Const32 [int64(1<<15+(umagic(16,c).m+1)/2)]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [16+umagic(16,c).s-2]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -3474,9 +3308,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { // cond: umagicOK(16, c) && config.RegSize == 4 && config.useAvg // result: (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) (Mul32 (Const32 [int64(umagic(16,c).m)]) (ZeroExt16to32 x))) (Const64 [16+umagic(16,c).s-1]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -3513,6 +3345,8 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -3520,13 +3354,10 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // cond: d != 0 // result: (Const32 [int64(int32(c)/int32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -3542,9 +3373,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // cond: isNonNegative(n) && isPowerOfTwo(c&0xffffffff) // result: (Rsh32Ux64 n (Const64 [log2(c&0xffffffff)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -3564,9 +3393,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // result: (Neg32 (Div32 n (Const32 [-c]))) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -3587,9 +3414,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // result: (Rsh32Ux64 (And32 x (Neg32 x)) (Const64 [31])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 || v_1.AuxInt != -1<<31 { break } @@ -3610,9 +3435,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // result: (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [32-log2(c)]))) (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -3645,9 +3468,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // result: (Sub32 (Rsh64x64 (Mul64 (Const64 [int64(smagic(32,c).m)]) (SignExt32to64 x)) (Const64 [32+smagic(32,c).s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3685,9 +3506,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // result: (Sub32 (Rsh32x64 (Hmul32 (Const32 [int64(int32(smagic(32,c).m/2))]) x) (Const64 [smagic(32,c).s-1])) (Rsh32x64 x (Const64 [31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3721,9 +3540,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { // result: (Sub32 (Rsh32x64 (Add32 (Hmul32 (Const32 [int64(int32(smagic(32,c).m))]) x) x) (Const64 [smagic(32,c).s])) (Rsh32x64 x (Const64 [31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3758,17 +3575,16 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Div32F (Const32F [c]) (Const32F [d])) // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32F { break } @@ -3781,9 +3597,7 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool { // cond: reciprocalExact32(auxTo32F(c)) // result: (Mul32F x (Const32F [auxFrom32F(1/auxTo32F(c))])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32F { break } @@ -3802,6 +3616,8 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -3809,13 +3625,10 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: d != 0 // result: (Const32 [int64(int32(uint32(c)/uint32(d)))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -3831,9 +3644,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: isPowerOfTwo(c&0xffffffff) // result: (Rsh32Ux64 n (Const64 [log2(c&0xffffffff)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -3852,9 +3663,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 && config.useHmul // result: (Rsh32Ux64 (Hmul32u (Const32 [int64(int32(1<<31+umagic(32,c).m/2))]) x) (Const64 [umagic(32,c).s-1])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3879,9 +3688,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 && config.useHmul // result: (Rsh32Ux64 (Hmul32u (Const32 [int64(int32(1<<31+(umagic(32,c).m+1)/2))]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [umagic(32,c).s-2])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3911,9 +3718,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: umagicOK(32, c) && config.RegSize == 4 && config.useAvg && config.useHmul // result: (Rsh32Ux64 (Avg32u x (Hmul32u (Const32 [int64(int32(umagic(32,c).m))]) x)) (Const64 [umagic(32,c).s-1])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3941,9 +3746,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0 // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<31+umagic(32,c).m/2)]) (ZeroExt32to64 x)) (Const64 [32+umagic(32,c).s-1]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -3971,9 +3774,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: umagicOK(32, c) && config.RegSize == 8 && c&1 == 0 // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<31+(umagic(32,c).m+1)/2)]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [32+umagic(32,c).s-2]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -4006,9 +3807,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { // cond: umagicOK(32, c) && config.RegSize == 8 && config.useAvg // result: (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) (Mul64 (Const64 [int64(umagic(32,c).m)]) (ZeroExt32to64 x))) (Const64 [32+umagic(32,c).s-1]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -4045,6 +3844,8 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -4052,13 +3853,10 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // cond: d != 0 // result: (Const64 [c/d]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -4074,9 +3872,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // cond: isNonNegative(n) && isPowerOfTwo(c) // result: (Rsh64Ux64 n (Const64 [log2(c)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -4095,9 +3891,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // cond: isNonNegative(n) // result: (Const64 [0]) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 || !(isNonNegative(n)) { break } @@ -4110,9 +3904,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // result: (Neg64 (Div64 n (Const64 [-c]))) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -4133,9 +3925,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // result: (Rsh64Ux64 (And64 x (Neg64 x)) (Const64 [63])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 { break } @@ -4156,9 +3946,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // result: (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [64-log2(c)]))) (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -4191,9 +3979,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // result: (Sub64 (Rsh64x64 (Hmul64 (Const64 [int64(smagic(64,c).m/2)]) x) (Const64 [smagic(64,c).s-1])) (Rsh64x64 x (Const64 [63]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -4227,9 +4013,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { // result: (Sub64 (Rsh64x64 (Add64 (Hmul64 (Const64 [int64(smagic(64,c).m)]) x) x) (Const64 [smagic(64,c).s])) (Rsh64x64 x (Const64 [63]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -4264,17 +4048,16 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Div64F (Const64F [c]) (Const64F [d])) // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64F { break } @@ -4287,9 +4070,7 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool { // cond: reciprocalExact64(auxTo64F(c)) // result: (Mul64F x (Const64F [auxFrom64F(1/auxTo64F(c))])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64F { break } @@ -4308,6 +4089,8 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -4315,13 +4098,10 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { // cond: d != 0 // result: (Const64 [int64(uint64(c)/uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -4337,9 +4117,7 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { // cond: isPowerOfTwo(c) // result: (Rsh64Ux64 n (Const64 [log2(c)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -4357,9 +4135,7 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { // match: (Div64u n (Const64 [-1<<63])) // result: (Rsh64Ux64 n (Const64 [63])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 { break } @@ -4374,9 +4150,7 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { // cond: umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 && config.useHmul // result: (Rsh64Ux64 (Hmul64u (Const64 [int64(1<<63+umagic(64,c).m/2)]) x) (Const64 [umagic(64,c).s-1])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -4401,9 +4175,7 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { // cond: umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 && config.useHmul // result: (Rsh64Ux64 (Hmul64u (Const64 [int64(1<<63+(umagic(64,c).m+1)/2)]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [umagic(64,c).s-2])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -4433,9 +4205,7 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { // cond: umagicOK(64, c) && config.RegSize == 8 && config.useAvg && config.useHmul // result: (Rsh64Ux64 (Avg64u x (Hmul64u (Const64 [int64(umagic(64,c).m)]) x)) (Const64 [umagic(64,c).s-1])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -4462,19 +4232,18 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(int8(c)/int8(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -4490,9 +4259,7 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { // cond: isNonNegative(n) && isPowerOfTwo(c&0xff) // result: (Rsh8Ux64 n (Const64 [log2(c&0xff)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -4512,9 +4279,7 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { // result: (Neg8 (Div8 n (Const8 [-c]))) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -4535,9 +4300,7 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { // result: (Rsh8Ux64 (And8 x (Neg8 x)) (Const64 [7 ])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 || v_1.AuxInt != -1<<7 { break } @@ -4558,9 +4321,7 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { // result: (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [ 8-log2(c)]))) (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -4593,9 +4354,7 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { // result: (Sub8 (Rsh32x64 (Mul32 (Const32 [int64(smagic(8,c).m)]) (SignExt8to32 x)) (Const64 [8+smagic(8,c).s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -4631,19 +4390,18 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { return false } func rewriteValuegeneric_OpDiv8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(int8(uint8(c)/uint8(d)))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -4659,9 +4417,7 @@ func rewriteValuegeneric_OpDiv8u_0(v *Value) bool { // cond: isPowerOfTwo(c&0xff) // result: (Rsh8Ux64 n (Const64 [log2(c&0xff)])) for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -4680,9 +4436,7 @@ func rewriteValuegeneric_OpDiv8u_0(v *Value) bool { // cond: umagicOK(8, c) // result: (Trunc32to8 (Rsh32Ux64 (Mul32 (Const32 [int64(1<<8+umagic(8,c).m)]) (ZeroExt8to32 x)) (Const64 [8+umagic(8,c).s]))) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -4709,14 +4463,16 @@ func rewriteValuegeneric_OpDiv8u_0(v *Value) bool { return false } func rewriteValuegeneric_OpEq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types // match: (Eq16 x x) // result: (ConstBool [1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -4726,26 +4482,24 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // match: (Eq16 (Const16 [c]) (Add16 (Const16 [d]) x)) // result: (Eq16 (Const16 [int64(int16(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) @@ -4759,14 +4513,11 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // match: (Eq16 (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -4781,9 +4532,7 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: x.Op != OpConst16 && udivisibleOK(16,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32u (ZeroExt16to32 x) (Const32 [c&0xffff])) (Const32 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMod16u { continue } @@ -4794,7 +4543,6 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } c := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && udivisibleOK(16, c) && !hasSmallRotate(config)) { continue } @@ -4818,9 +4566,7 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: x.Op != OpConst16 && sdivisibleOK(16,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32 (SignExt16to32 x) (Const32 [c])) (Const32 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMod16 { continue } @@ -4831,7 +4577,6 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } c := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && sdivisibleOK(16, c) && !hasSmallRotate(config)) { continue } @@ -4855,21 +4600,19 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c) // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc64to16 { continue } @@ -4883,13 +4626,13 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { continue } @@ -4926,21 +4669,19 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc32to16 { continue } @@ -4954,13 +4695,13 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { continue } @@ -4997,21 +4738,19 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c) // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc32to16 { continue } @@ -5025,13 +4764,13 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpRsh32Ux64 { continue } @@ -5077,21 +4816,19 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c) // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int64(int16(udivisible(16,c).m))]) x) (Const16 [int64(16-udivisible(16,c).k)]) ) (Const16 [int64(int16(udivisible(16,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc32to16 { continue } @@ -5123,13 +4860,13 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { continue } @@ -5166,21 +4903,19 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c) // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int64(int16(sdivisible(16,c).m))]) x) (Const16 [int64(int16(sdivisible(16,c).a))]) ) (Const16 [int64(16-sdivisible(16,c).k)]) ) (Const16 [int64(int16(sdivisible(16,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub16 { continue } @@ -5195,13 +4930,13 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { continue } @@ -5252,16 +4987,16 @@ func rewriteValuegeneric_OpEq16_0(v *Value) bool { return false } func rewriteValuegeneric_OpEq16_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) // cond: k > 0 && k < 15 && kbar == 16 - k // result: (Eq16 (And16 n (Const16 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh16x64 { continue } @@ -5277,12 +5012,10 @@ func rewriteValuegeneric_OpEq16_10(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -5331,15 +5064,13 @@ func rewriteValuegeneric_OpEq16_10(v *Value) bool { // cond: s.Uses == 1 // result: (Eq16 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub16 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -5354,22 +5085,20 @@ func rewriteValuegeneric_OpEq16_10(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd16 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst16 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -5391,13 +5120,15 @@ func rewriteValuegeneric_OpEq16_10(v *Value) bool { return false } func rewriteValuegeneric_OpEq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x x) // result: (ConstBool [1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -5407,26 +5138,24 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // match: (Eq32 (Const32 [c]) (Add32 (Const32 [d]) x)) // result: (Eq32 (Const32 [int64(int32(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpEq32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c - d)) @@ -5440,14 +5169,11 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // match: (Eq32 (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -5462,21 +5188,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpRsh32Ux64 { continue } @@ -5486,13 +5210,14 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i2] { + if x != mul_1 { continue } v_1_1_1 := v_1_1.Args[1] @@ -5528,21 +5253,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpRsh32Ux64 { continue } @@ -5552,13 +5275,13 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpRsh32Ux64 { continue } @@ -5603,21 +5326,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpRsh32Ux64 { continue } @@ -5635,13 +5356,14 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i2] { + if x != mul_1 { continue } v_1_1_1 := v_1_1.Args[1] @@ -5677,21 +5399,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc64to32 { continue } @@ -5705,13 +5425,13 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { continue } @@ -5748,21 +5468,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc64to32 { continue } @@ -5776,13 +5494,13 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpRsh64Ux64 { continue } @@ -5828,21 +5546,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int64(int32(udivisible(32,c).m))]) x) (Const32 [int64(32-udivisible(32,c).k)]) ) (Const32 [int64(int32(udivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc64to32 { continue } @@ -5874,13 +5590,13 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { continue } @@ -5917,21 +5633,19 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub32 { continue } @@ -5946,13 +5660,13 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { continue } @@ -6003,27 +5717,27 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { return false } func rewriteValuegeneric_OpEq32_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub32 { continue } @@ -6038,13 +5752,14 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i2] { + if x != mul_1 { continue } v_1_1_0_1 := v_1_1_0.Args[1] @@ -6094,21 +5809,19 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c) // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int64(int32(sdivisible(32,c).m))]) x) (Const32 [int64(int32(sdivisible(32,c).a))]) ) (Const32 [int64(32-sdivisible(32,c).k)]) ) (Const32 [int64(int32(sdivisible(32,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub32 { continue } @@ -6123,19 +5836,22 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { continue } _ = v_1_1_0_0.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul := v_1_1_0_0.Args[_i2] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + v_1_1_0_0_1 := v_1_1_0_0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 { + mul := v_1_1_0_0_0 if mul.Op != OpHmul32 { continue } _ = mul.Args[1] - for _i3 := 0; _i3 <= 1; _i3++ { - mul_0 := mul.Args[_i3] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i3] || x != v_1_1_0_0.Args[1^_i2] { + if x != mul_1 || x != v_1_1_0_0_1 { continue } v_1_1_0_1 := v_1_1_0.Args[1] @@ -6186,10 +5902,8 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { // cond: k > 0 && k < 31 && kbar == 32 - k // result: (Eq32 (And32 n (Const32 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh32x64 { continue } @@ -6205,12 +5919,10 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -6259,15 +5971,13 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { // cond: s.Uses == 1 // result: (Eq32 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub32 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -6282,22 +5992,20 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd32 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst32 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -6319,17 +6027,16 @@ func rewriteValuegeneric_OpEq32_10(v *Value) bool { return false } func rewriteValuegeneric_OpEq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq32F (Const32F [c]) (Const32F [d])) // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32F { continue } @@ -6343,13 +6050,15 @@ func rewriteValuegeneric_OpEq32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpEq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq64 x x) // result: (ConstBool [1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -6359,26 +6068,24 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // match: (Eq64 (Const64 [c]) (Add64 (Const64 [d]) x)) // result: (Eq64 (Const64 [c-d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpEq64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - d @@ -6392,14 +6099,11 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // match: (Eq64 (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -6414,21 +6118,19 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpRsh64Ux64 { continue } @@ -6438,13 +6140,14 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i2] { + if x != mul_1 { continue } v_1_1_1 := v_1_1.Args[1] @@ -6480,21 +6183,19 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpRsh64Ux64 { continue } @@ -6504,13 +6205,13 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpRsh64Ux64 { continue } @@ -6555,21 +6256,19 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible(64,c).m)]) x) (Const64 [int64(64-udivisible(64,c).k)]) ) (Const64 [int64(udivisible(64,c).max)]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpRsh64Ux64 { continue } @@ -6587,13 +6286,14 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i2] { + if x != mul_1 { continue } v_1_1_1 := v_1_1.Args[1] @@ -6629,21 +6329,19 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub64 { continue } @@ -6658,13 +6356,14 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i2] { + if x != mul_1 { continue } v_1_1_0_1 := v_1_1_0.Args[1] @@ -6714,21 +6413,19 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c) // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible(64,c).m)]) x) (Const64 [int64(sdivisible(64,c).a)]) ) (Const64 [int64(64-sdivisible(64,c).k)]) ) (Const64 [int64(sdivisible(64,c).max)]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub64 { continue } @@ -6743,19 +6440,22 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { continue } _ = v_1_1_0_0.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul := v_1_1_0_0.Args[_i2] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + v_1_1_0_0_1 := v_1_1_0_0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 { + mul := v_1_1_0_0_0 if mul.Op != OpHmul64 { continue } _ = mul.Args[1] - for _i3 := 0; _i3 <= 1; _i3++ { - mul_0 := mul.Args[_i3] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 { if mul_0.Op != OpConst64 { continue } m := mul_0.AuxInt - if x != mul.Args[1^_i3] || x != v_1_1_0_0.Args[1^_i2] { + if x != mul_1 || x != v_1_1_0_0_1 { continue } v_1_1_0_1 := v_1_1_0.Args[1] @@ -6806,10 +6506,8 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: k > 0 && k < 63 && kbar == 64 - k // result: (Eq64 (And64 n (Const64 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh64x64 { continue } @@ -6825,12 +6523,10 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -6879,15 +6575,13 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { // cond: s.Uses == 1 // result: (Eq64 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub64 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -6901,27 +6595,27 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { return false } func rewriteValuegeneric_OpEq64_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Eq64 (And64 x (Const64 [y])) (Const64 [y])) // cond: isPowerOfTwo(y) // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd64 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst64 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -6943,17 +6637,16 @@ func rewriteValuegeneric_OpEq64_10(v *Value) bool { return false } func rewriteValuegeneric_OpEq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Eq64F (Const64F [c]) (Const64F [d])) // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64F { continue } @@ -6967,14 +6660,16 @@ func rewriteValuegeneric_OpEq64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpEq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types // match: (Eq8 x x) // result: (ConstBool [1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -6984,26 +6679,24 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // match: (Eq8 (Const8 [c]) (Add8 (Const8 [d]) x)) // result: (Eq8 (Const8 [int64(int8(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpEq8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c - d)) @@ -7017,14 +6710,11 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // match: (Eq8 (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -7039,9 +6729,7 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: x.Op != OpConst8 && udivisibleOK(8,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32u (ZeroExt8to32 x) (Const32 [c&0xff])) (Const32 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMod8u { continue } @@ -7052,7 +6740,6 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { continue } c := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && udivisibleOK(8, c) && !hasSmallRotate(config)) { continue } @@ -7076,9 +6763,7 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: x.Op != OpConst8 && sdivisibleOK(8,c) && !hasSmallRotate(config) // result: (Eq32 (Mod32 (SignExt8to32 x) (Const32 [c])) (Const32 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpMod8 { continue } @@ -7089,7 +6774,6 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { continue } c := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && sdivisibleOK(8, c) && !hasSmallRotate(config)) { continue } @@ -7113,21 +6797,19 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c) // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int64(int8(udivisible(8,c).m))]) x) (Const8 [int64(8-udivisible(8,c).k)]) ) (Const8 [int64(int8(udivisible(8,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpTrunc32to8 { continue } @@ -7141,13 +6823,13 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { continue } @@ -7184,21 +6866,19 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c) // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int64(int8(sdivisible(8,c).m))]) x) (Const8 [int64(int8(sdivisible(8,c).a))]) ) (Const8 [int64(8-sdivisible(8,c).k)]) ) (Const8 [int64(int8(sdivisible(8,c).max))]) ) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpMul8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 { continue } c := v_1_0.AuxInt - v_1_1 := v_1.Args[1^_i1] if v_1_1.Op != OpSub8 { continue } @@ -7213,13 +6893,13 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { continue } _ = mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2++ { - mul_0 := mul.Args[_i2] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { if mul_0.Op != OpConst32 { continue } m := mul_0.AuxInt - mul_1 := mul.Args[1^_i2] if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { continue } @@ -7271,10 +6951,8 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: k > 0 && k < 7 && kbar == 8 - k // result: (Eq8 (And8 n (Const8 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh8x64 { continue } @@ -7290,12 +6968,10 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -7344,15 +7020,13 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: s.Uses == 1 // result: (Eq8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub8 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -7367,22 +7041,20 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd8 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst8 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -7404,17 +7076,16 @@ func rewriteValuegeneric_OpEq8_0(v *Value) bool { return false } func rewriteValuegeneric_OpEqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EqB (ConstBool [c]) (ConstBool [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstBool { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConstBool { continue } @@ -7428,13 +7099,11 @@ func rewriteValuegeneric_OpEqB_0(v *Value) bool { // match: (EqB (ConstBool [0]) x) // result: (Not x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstBool || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpNot) v.AddArg(x) return true @@ -7444,13 +7113,11 @@ func rewriteValuegeneric_OpEqB_0(v *Value) bool { // match: (EqB (ConstBool [1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstBool || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -7461,13 +7128,15 @@ func rewriteValuegeneric_OpEqB_0(v *Value) bool { return false } func rewriteValuegeneric_OpEqInter_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqInter x y) // result: (EqPtr (ITab x) (ITab y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpEqPtr) v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr) v0.AddArg(x) @@ -7479,11 +7148,13 @@ func rewriteValuegeneric_OpEqInter_0(v *Value) bool { } } func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (EqPtr x x) // result: (ConstBool [1]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -7493,14 +7164,11 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (Addr {a} _) (Addr {b} _)) // result: (ConstBool [b2i(a == b)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } a := v_0.Aux - v_1 := v.Args[1^_i0] if v_1.Op != OpAddr { continue } @@ -7514,14 +7182,11 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) // result: (ConstBool [b2i(a == b && o == 0)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } a := v_0.Aux - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7540,9 +7205,7 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) // result: (ConstBool [b2i(a == b && o1 == o2)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -7552,7 +7215,6 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { continue } a := v_0_0.Aux - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7571,15 +7233,12 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) // result: (ConstBool [b2i(a == b)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } a := v_0.Aux _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpLocalAddr { continue } @@ -7594,15 +7253,12 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a == b && o == 0)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } a := v_0.Aux _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7622,9 +7278,7 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a == b && o1 == o2)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -7635,7 +7289,6 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { } a := v_0_0.Aux _ = v_0_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7656,15 +7309,13 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 == 0)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } o1 := v_0.AuxInt p1 := v_0.Args[0] - p2 := v.Args[1^_i0] + p2 := v_1 if !(isSamePtr(p1, p2)) { continue } @@ -7678,15 +7329,12 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 == o2)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } o1 := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7704,14 +7352,11 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { // match: (EqPtr (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -7725,19 +7370,18 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqPtr (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c == d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -7751,14 +7395,11 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (LocalAddr _ _) (Addr _)) // result: (ConstBool [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpAddr { continue } @@ -7771,9 +7412,7 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) // result: (ConstBool [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -7782,7 +7421,6 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { continue } _ = v_0_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpAddr { continue } @@ -7795,14 +7433,11 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (LocalAddr _ _) (OffPtr (Addr _))) // result: (ConstBool [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7819,9 +7454,7 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) // result: (ConstBool [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -7830,7 +7463,6 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { continue } _ = v_0_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -7848,15 +7480,13 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // cond: isSamePtr(p1, p2) // result: (Not (IsNonNil o1)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddPtr { continue } o1 := v_0.Args[1] p1 := v_0.Args[0] - p2 := v.Args[1^_i0] + p2 := v_1 if !(isSamePtr(p1, p2)) { continue } @@ -7871,13 +7501,11 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (Const32 [0]) p) // result: (Not (IsNonNil p)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } - p := v.Args[1^_i0] + p := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) v0.AddArg(p) @@ -7889,13 +7517,11 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (Const64 [0]) p) // result: (Not (IsNonNil p)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } - p := v.Args[1^_i0] + p := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) v0.AddArg(p) @@ -7907,13 +7533,11 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { // match: (EqPtr (ConstNil) p) // result: (Not (IsNonNil p)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstNil { continue } - p := v.Args[1^_i0] + p := v_1 v.reset(OpNot) v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) v0.AddArg(p) @@ -7925,13 +7549,15 @@ func rewriteValuegeneric_OpEqPtr_10(v *Value) bool { return false } func rewriteValuegeneric_OpEqSlice_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqSlice x y) // result: (EqPtr (SlicePtr x) (SlicePtr y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpEqPtr) v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr) v0.AddArg(x) @@ -7943,16 +7569,15 @@ func rewriteValuegeneric_OpEqSlice_0(v *Value) bool { } } func rewriteValuegeneric_OpGeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq16 (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(c >= d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -7965,19 +7590,17 @@ func rewriteValuegeneric_OpGeq16_0(v *Value) bool { // cond: int16(c) >= 0 // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_1.Op != OpConst16 { continue } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(int16(c) >= 0) { continue } @@ -7990,16 +7613,15 @@ func rewriteValuegeneric_OpGeq16_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq16U (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(uint16(c) >= uint16(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -8011,16 +7633,15 @@ func rewriteValuegeneric_OpGeq16U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq32 (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c >= d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -8033,19 +7654,17 @@ func rewriteValuegeneric_OpGeq32_0(v *Value) bool { // cond: int32(c) >= 0 // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_1.Op != OpConst32 { continue } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 || v_1.AuxInt != 0 || !(int32(c) >= 0) { continue } @@ -8058,16 +7677,15 @@ func rewriteValuegeneric_OpGeq32_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq32F (Const32F [c]) (Const32F [d])) // result: (ConstBool [b2i(auxTo32F(c) >= auxTo32F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32F { break } @@ -8079,16 +7697,15 @@ func rewriteValuegeneric_OpGeq32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq32U (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(uint32(c) >= uint32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -8100,16 +7717,15 @@ func rewriteValuegeneric_OpGeq32U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64 (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c >= d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -8122,19 +7738,17 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { // cond: int64(c) >= 0 // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_1.Op != OpConst64 { continue } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(int64(c) >= 0) { continue } @@ -8148,8 +7762,6 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { // cond: c > 0 // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh64Ux64 { break } @@ -8159,7 +7771,6 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(c > 0) { break } @@ -8170,16 +7781,15 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64F (Const64F [c]) (Const64F [d])) // result: (ConstBool [b2i(auxTo64F(c) >= auxTo64F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64F { break } @@ -8191,16 +7801,15 @@ func rewriteValuegeneric_OpGeq64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq64U (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(uint64(c) >= uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -8212,16 +7821,15 @@ func rewriteValuegeneric_OpGeq64U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq8 (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(c >= d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -8234,19 +7842,17 @@ func rewriteValuegeneric_OpGeq8_0(v *Value) bool { // cond: int8(c) >= 0 // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd8 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_1 := v_0.Args[1^_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_1.Op != OpConst8 { continue } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(int8(c) >= 0) { continue } @@ -8259,16 +7865,15 @@ func rewriteValuegeneric_OpGeq8_0(v *Value) bool { return false } func rewriteValuegeneric_OpGeq8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Geq8U (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(uint8(c) >= uint8(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -8280,16 +7885,15 @@ func rewriteValuegeneric_OpGeq8U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater16 (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(c > d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -8301,16 +7905,15 @@ func rewriteValuegeneric_OpGreater16_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater16U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater16U (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(uint16(c) > uint16(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -8322,16 +7925,15 @@ func rewriteValuegeneric_OpGreater16U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32 (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c > d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -8343,16 +7945,15 @@ func rewriteValuegeneric_OpGreater32_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32F (Const32F [c]) (Const32F [d])) // result: (ConstBool [b2i(auxTo32F(c) > auxTo32F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32F { break } @@ -8364,16 +7965,15 @@ func rewriteValuegeneric_OpGreater32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater32U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater32U (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(uint32(c) > uint32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -8385,16 +7985,15 @@ func rewriteValuegeneric_OpGreater32U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64 (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c > d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -8406,16 +8005,15 @@ func rewriteValuegeneric_OpGreater64_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64F (Const64F [c]) (Const64F [d])) // result: (ConstBool [b2i(auxTo64F(c) > auxTo64F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64F { break } @@ -8427,16 +8025,15 @@ func rewriteValuegeneric_OpGreater64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater64U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater64U (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(uint64(c) > uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -8448,16 +8045,15 @@ func rewriteValuegeneric_OpGreater64U_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater8 (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(c > d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -8469,16 +8065,15 @@ func rewriteValuegeneric_OpGreater8_0(v *Value) bool { return false } func rewriteValuegeneric_OpGreater8U_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Greater8U (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(uint8(c) > uint8(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -8490,12 +8085,12 @@ func rewriteValuegeneric_OpGreater8U_0(v *Value) bool { return false } func rewriteValuegeneric_OpIMake_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IMake typ (StructMake1 val)) // result: (IMake typ val) for { - _ = v.Args[1] - typ := v.Args[0] - v_1 := v.Args[1] + typ := v_0 if v_1.Op != OpStructMake1 { break } @@ -8508,9 +8103,7 @@ func rewriteValuegeneric_OpIMake_0(v *Value) bool { // match: (IMake typ (ArrayMake1 val)) // result: (IMake typ val) for { - _ = v.Args[1] - typ := v.Args[0] - v_1 := v.Args[1] + typ := v_0 if v_1.Op != OpArrayMake1 { break } @@ -8523,13 +8116,13 @@ func rewriteValuegeneric_OpIMake_0(v *Value) bool { return false } func rewriteValuegeneric_OpInterCall_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (InterCall [argsize] (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) // cond: devirt(v, itab, off) != nil // result: (StaticCall [argsize] {devirt(v, itab, off)} mem) for { argsize := v.AuxInt - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLoad { break } @@ -8554,7 +8147,11 @@ func rewriteValuegeneric_OpInterCall_0(v *Value) bool { } itab := v_0_0_0_0_0.Aux v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpSB || !(devirt(v, itab, off) != nil) { + if v_0_0_0_0_0_0.Op != OpSB { + break + } + mem := v_1 + if !(devirt(v, itab, off) != nil) { break } v.reset(OpStaticCall) @@ -8566,17 +8163,13 @@ func rewriteValuegeneric_OpInterCall_0(v *Value) bool { return false } func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c])) // cond: (1 << 8) <= c // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to32 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_0.Op != OpZeroExt8to32 || v_1.Op != OpConst32 { break } c := v_1.AuxInt @@ -8591,13 +8184,7 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: (1 << 8) <= c // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to64 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.Op != OpZeroExt8to64 || v_1.Op != OpConst64 { break } c := v_1.AuxInt @@ -8612,13 +8199,7 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: (1 << 16) <= c // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to32 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_0.Op != OpZeroExt16to32 || v_1.Op != OpConst32 { break } c := v_1.AuxInt @@ -8633,13 +8214,7 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: (1 << 16) <= c // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to64 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.Op != OpZeroExt16to64 || v_1.Op != OpConst64 { break } c := v_1.AuxInt @@ -8653,8 +8228,8 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // match: (IsInBounds x x) // result: (ConstBool [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -8665,19 +8240,17 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd8 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst8 { continue } c := v_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { continue } @@ -8695,8 +8268,6 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to16 { break } @@ -8705,13 +8276,13 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { break } _ = v_0_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0_0 := v_0_0.Args[_i0] + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { if v_0_0_0.Op != OpConst8 { continue } c := v_0_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { continue } @@ -8729,8 +8300,6 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to32 { break } @@ -8739,13 +8308,13 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { break } _ = v_0_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0_0 := v_0_0.Args[_i0] + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { if v_0_0_0.Op != OpConst8 { continue } c := v_0_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { continue } @@ -8763,8 +8332,6 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to64 { break } @@ -8773,13 +8340,13 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { break } _ = v_0_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0_0 := v_0_0.Args[_i0] + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { if v_0_0_0.Op != OpConst8 { continue } c := v_0_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { continue } @@ -8797,19 +8364,17 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst16 { continue } c := v_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { continue } @@ -8826,12 +8391,12 @@ func rewriteValuegeneric_OpIsInBounds_0(v *Value) bool { return false } func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpZeroExt16to32 { break } @@ -8840,13 +8405,13 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0_0 := v_0_0.Args[_i0] + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { if v_0_0_0.Op != OpConst16 { continue } c := v_0_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { continue } @@ -8864,8 +8429,6 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpZeroExt16to64 { break } @@ -8874,13 +8437,13 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0_0 := v_0_0.Args[_i0] + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { if v_0_0_0.Op != OpConst16 { continue } c := v_0_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { continue } @@ -8898,19 +8461,17 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst32 { continue } c := v_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { continue } @@ -8928,8 +8489,6 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpZeroExt32to64 { break } @@ -8938,13 +8497,13 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { break } _ = v_0_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0_0 := v_0_0.Args[_i0] + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { if v_0_0_0.Op != OpConst32 { continue } c := v_0_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { continue } @@ -8962,19 +8521,17 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // cond: 0 <= c && c < d // result: (ConstBool [1]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst64 { continue } c := v_0_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { continue } @@ -8991,13 +8548,10 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // match: (IsInBounds (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(0 <= c && c < d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -9009,13 +8563,10 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // match: (IsInBounds (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(0 <= c && c < d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -9027,13 +8578,11 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // match: (IsInBounds (Mod32u _ y) y) // result: (ConstBool [1]) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMod32u { break } - _ = v_0.Args[1] - if y != v_0.Args[1] { + y := v_0.Args[1] + if y != v_1 { break } v.reset(OpConstBool) @@ -9043,13 +8592,11 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // match: (IsInBounds (Mod64u _ y) y) // result: (ConstBool [1]) for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMod64u { break } - _ = v_0.Args[1] - if y != v_0.Args[1] { + y := v_0.Args[1] + if y != v_1 { break } v.reset(OpConstBool) @@ -9060,8 +8607,6 @@ func rewriteValuegeneric_OpIsInBounds_10(v *Value) bool { // cond: 0 < c && c < 8 && 1< p1 (Store {t2} p2 x _)) @@ -9995,9 +9477,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: x for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10018,9 +9498,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: x for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10048,9 +9526,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: x for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10085,9 +9561,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: x for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10129,9 +9603,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: (Const64F [x]) for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10155,9 +9627,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10181,9 +9651,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: (Const64 [x]) for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10207,9 +9675,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))]) for { t1 := v.Type - _ = v.Args[1] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpStore { break } @@ -10233,14 +9699,12 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: @mem.Block (Load (OffPtr [o1] p3) mem) for { t1 := v.Type - _ = v.Args[1] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - v_1 := v.Args[1] if v_1.Op != OpStore { break } @@ -10273,14 +9737,12 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { // result: @mem.Block (Load (OffPtr [o1] p4) mem) for { t1 := v.Type - _ = v.Args[1] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - v_1 := v.Args[1] if v_1.Op != OpStore { break } @@ -10318,6 +9780,8 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { return false } func rewriteValuegeneric_OpLoad_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block fe := b.Func.fe // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _))))) @@ -10325,14 +9789,12 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: @mem.Block (Load (OffPtr [o1] p5) mem) for { t1 := v.Type - _ = v.Args[1] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - v_1 := v.Args[1] if v_1.Op != OpStore { break } @@ -10379,14 +9841,12 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: @mem.Block (Load (OffPtr [o1] p6) mem) for { t1 := v.Type - _ = v.Args[1] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - v_1 := v.Args[1] if v_1.Op != OpStore { break } @@ -10440,14 +9900,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (ConstBool [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10466,14 +9923,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (Const8 [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10492,14 +9946,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (Const16 [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10518,14 +9969,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (Const32 [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10544,14 +9992,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (Const64 [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10570,14 +10015,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (Const32F [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10596,14 +10038,11 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (Const64F [0]) for { t1 := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpZero { break } @@ -10622,7 +10061,6 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { // result: (StructMake0) for { t := v.Type - _ = v.Args[1] if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { break } @@ -10632,6 +10070,8 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { return false } func rewriteValuegeneric_OpLoad_20(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block fe := b.Func.fe // match: (Load ptr mem) @@ -10639,8 +10079,8 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { // result: (StructMake1 (Load (OffPtr [0] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { break } @@ -10659,8 +10099,8 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { // result: (StructMake2 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) { break } @@ -10686,8 +10126,8 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { // result: (StructMake3 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) { break } @@ -10720,8 +10160,8 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { // result: (StructMake4 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem) (Load (OffPtr [t.FieldOff(3)] ptr) mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) { break } @@ -10761,7 +10201,6 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { // result: (ArrayMake0) for { t := v.Type - _ = v.Args[1] if !(t.IsArray() && t.NumElem() == 0) { break } @@ -10773,8 +10212,8 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { // result: (ArrayMake1 (Load ptr mem)) for { t := v.Type - mem := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + mem := v_1 if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) { break } @@ -10788,14 +10227,14 @@ func rewriteValuegeneric_OpLoad_20(v *Value) bool { return false } func rewriteValuegeneric_OpLsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x16 x (Const16 [c])) // result: (Lsh16x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -10810,8 +10249,6 @@ func rewriteValuegeneric_OpLsh16x16_0(v *Value) bool { // match: (Lsh16x16 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -10822,14 +10259,14 @@ func rewriteValuegeneric_OpLsh16x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x32 x (Const32 [c])) // result: (Lsh16x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -10844,8 +10281,6 @@ func rewriteValuegeneric_OpLsh16x32_0(v *Value) bool { // match: (Lsh16x32 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -10856,18 +10291,17 @@ func rewriteValuegeneric_OpLsh16x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x64 (Const16 [c]) (Const64 [d])) // result: (Const16 [int64(int16(c) << uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -10879,9 +10313,7 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { // match: (Lsh16x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -10893,8 +10325,6 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { // match: (Lsh16x64 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -10906,8 +10336,6 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (Const16 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -10924,8 +10352,6 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { // result: (Lsh16x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh16x64 { break } @@ -10936,7 +10362,6 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -10955,8 +10380,6 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Lsh16x64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh16Ux64 { break } @@ -10977,7 +10400,6 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -10995,14 +10417,14 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh16x8 x (Const8 [c])) // result: (Lsh16x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -11017,8 +10439,6 @@ func rewriteValuegeneric_OpLsh16x8_0(v *Value) bool { // match: (Lsh16x8 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -11029,14 +10449,14 @@ func rewriteValuegeneric_OpLsh16x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x16 x (Const16 [c])) // result: (Lsh32x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -11051,8 +10471,6 @@ func rewriteValuegeneric_OpLsh32x16_0(v *Value) bool { // match: (Lsh32x16 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -11063,14 +10481,14 @@ func rewriteValuegeneric_OpLsh32x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x32 x (Const32 [c])) // result: (Lsh32x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -11085,8 +10503,6 @@ func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool { // match: (Lsh32x32 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -11097,18 +10513,17 @@ func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x64 (Const32 [c]) (Const64 [d])) // result: (Const32 [int64(int32(c) << uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11120,9 +10535,7 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { // match: (Lsh32x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -11134,8 +10547,6 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { // match: (Lsh32x64 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -11147,8 +10558,6 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11165,8 +10574,6 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { // result: (Lsh32x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh32x64 { break } @@ -11177,7 +10584,6 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11196,8 +10602,6 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Lsh32x64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh32Ux64 { break } @@ -11218,7 +10622,6 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11236,14 +10639,14 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh32x8 x (Const8 [c])) // result: (Lsh32x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -11258,8 +10661,6 @@ func rewriteValuegeneric_OpLsh32x8_0(v *Value) bool { // match: (Lsh32x8 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -11270,14 +10671,14 @@ func rewriteValuegeneric_OpLsh32x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x16 x (Const16 [c])) // result: (Lsh64x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -11292,8 +10693,6 @@ func rewriteValuegeneric_OpLsh64x16_0(v *Value) bool { // match: (Lsh64x16 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -11304,14 +10703,14 @@ func rewriteValuegeneric_OpLsh64x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x32 x (Const32 [c])) // result: (Lsh64x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -11326,8 +10725,6 @@ func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool { // match: (Lsh64x32 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -11338,18 +10735,17 @@ func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c << uint64(d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11361,9 +10757,7 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -11375,8 +10769,6 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { // match: (Lsh64x64 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -11388,8 +10780,6 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (Const64 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11406,8 +10796,6 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { // result: (Lsh64x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } @@ -11418,7 +10806,6 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11437,8 +10824,6 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Lsh64x64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh64Ux64 { break } @@ -11459,7 +10844,6 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11477,14 +10861,14 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh64x8 x (Const8 [c])) // result: (Lsh64x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -11499,8 +10883,6 @@ func rewriteValuegeneric_OpLsh64x8_0(v *Value) bool { // match: (Lsh64x8 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -11511,14 +10893,14 @@ func rewriteValuegeneric_OpLsh64x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x16 x (Const16 [c])) // result: (Lsh8x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -11533,8 +10915,6 @@ func rewriteValuegeneric_OpLsh8x16_0(v *Value) bool { // match: (Lsh8x16 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -11545,14 +10925,14 @@ func rewriteValuegeneric_OpLsh8x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x32 x (Const32 [c])) // result: (Lsh8x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -11567,8 +10947,6 @@ func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool { // match: (Lsh8x32 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -11579,18 +10957,17 @@ func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x64 (Const8 [c]) (Const64 [d])) // result: (Const8 [int64(int8(c) << uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11602,9 +10979,7 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { // match: (Lsh8x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -11616,8 +10991,6 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { // match: (Lsh8x64 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -11629,8 +11002,6 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (Const8 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11647,8 +11018,6 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { // result: (Lsh8x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh8x64 { break } @@ -11659,7 +11028,6 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11678,8 +11046,6 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Lsh8x64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh8Ux64 { break } @@ -11700,7 +11066,6 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -11718,14 +11083,14 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpLsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Lsh8x8 x (Const8 [c])) // result: (Lsh8x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -11740,8 +11105,6 @@ func rewriteValuegeneric_OpLsh8x8_0(v *Value) bool { // match: (Lsh8x8 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -11752,18 +11115,17 @@ func rewriteValuegeneric_OpLsh8x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod16 (Const16 [c]) (Const16 [d])) // cond: d != 0 // result: (Const16 [int64(int16(c % d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -11780,9 +11142,7 @@ func rewriteValuegeneric_OpMod16_0(v *Value) bool { // result: (And16 n (Const16 [(c&0xffff)-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -11802,9 +11162,7 @@ func rewriteValuegeneric_OpMod16_0(v *Value) bool { // result: (Mod16 n (Const16 [-c])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -11825,9 +11183,7 @@ func rewriteValuegeneric_OpMod16_0(v *Value) bool { // result: (Sub16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -11853,18 +11209,17 @@ func rewriteValuegeneric_OpMod16_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod16u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod16u (Const16 [c]) (Const16 [d])) // cond: d != 0 // result: (Const16 [int64(uint16(c) % uint16(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -11881,9 +11236,7 @@ func rewriteValuegeneric_OpMod16u_0(v *Value) bool { // result: (And16 n (Const16 [(c&0xffff)-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst16 { break } @@ -11903,9 +11256,7 @@ func rewriteValuegeneric_OpMod16u_0(v *Value) bool { // result: (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -11931,18 +11282,17 @@ func rewriteValuegeneric_OpMod16u_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod32 (Const32 [c]) (Const32 [d])) // cond: d != 0 // result: (Const32 [int64(int32(c % d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -11959,9 +11309,7 @@ func rewriteValuegeneric_OpMod32_0(v *Value) bool { // result: (And32 n (Const32 [(c&0xffffffff)-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -11981,9 +11329,7 @@ func rewriteValuegeneric_OpMod32_0(v *Value) bool { // result: (Mod32 n (Const32 [-c])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -12004,9 +11350,7 @@ func rewriteValuegeneric_OpMod32_0(v *Value) bool { // result: (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -12032,18 +11376,17 @@ func rewriteValuegeneric_OpMod32_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod32u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod32u (Const32 [c]) (Const32 [d])) // cond: d != 0 // result: (Const32 [int64(uint32(c) % uint32(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -12060,9 +11403,7 @@ func rewriteValuegeneric_OpMod32u_0(v *Value) bool { // result: (And32 n (Const32 [(c&0xffffffff)-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst32 { break } @@ -12082,9 +11423,7 @@ func rewriteValuegeneric_OpMod32u_0(v *Value) bool { // result: (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -12110,18 +11449,17 @@ func rewriteValuegeneric_OpMod32u_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod64 (Const64 [c]) (Const64 [d])) // cond: d != 0 // result: (Const64 [c % d]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -12138,9 +11476,7 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { // result: (And64 n (Const64 [c-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -12159,9 +11495,7 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { // cond: isNonNegative(n) // result: n for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 || !(isNonNegative(n)) { break } @@ -12175,9 +11509,7 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { // result: (Mod64 n (Const64 [-c])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -12198,9 +11530,7 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { // result: (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -12226,18 +11556,17 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod64u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod64u (Const64 [c]) (Const64 [d])) // cond: d != 0 // result: (Const64 [int64(uint64(c) % uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -12254,9 +11583,7 @@ func rewriteValuegeneric_OpMod64u_0(v *Value) bool { // result: (And64 n (Const64 [c-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 { break } @@ -12275,9 +11602,7 @@ func rewriteValuegeneric_OpMod64u_0(v *Value) bool { // result: (And64 n (Const64 [1<<63-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != -1<<63 { break } @@ -12293,9 +11618,7 @@ func rewriteValuegeneric_OpMod64u_0(v *Value) bool { // result: (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -12321,18 +11644,17 @@ func rewriteValuegeneric_OpMod64u_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod8 (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(int8(c % d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -12349,9 +11671,7 @@ func rewriteValuegeneric_OpMod8_0(v *Value) bool { // result: (And8 n (Const8 [(c&0xff)-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -12371,9 +11691,7 @@ func rewriteValuegeneric_OpMod8_0(v *Value) bool { // result: (Mod8 n (Const8 [-c])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -12394,9 +11712,7 @@ func rewriteValuegeneric_OpMod8_0(v *Value) bool { // result: (Sub8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -12422,18 +11738,17 @@ func rewriteValuegeneric_OpMod8_0(v *Value) bool { return false } func rewriteValuegeneric_OpMod8u_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Mod8u (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(uint8(c) % uint8(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -12450,9 +11765,7 @@ func rewriteValuegeneric_OpMod8u_0(v *Value) bool { // result: (And8 n (Const8 [(c&0xff)-1])) for { t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] + n := v_0 if v_1.Op != OpConst8 { break } @@ -12472,9 +11785,7 @@ func rewriteValuegeneric_OpMod8u_0(v *Value) bool { // result: (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -12500,6 +11811,9 @@ func rewriteValuegeneric_OpMod8u_0(v *Value) bool { return false } func rewriteValuegeneric_OpMove_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) // cond: isSamePtr(src, dst2) @@ -12507,10 +11821,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src := v.Args[1] - mem := v.Args[2] + dst1 := v_0 + src := v_1 + mem := v_2 if mem.Op != OpZero || mem.AuxInt != n || mem.Aux != t { break } @@ -12532,10 +11845,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src := v.Args[1] - mem := v.Args[2] + dst1 := v_0 + src := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -12561,10 +11873,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src1 := v.Args[1] - store := v.Args[2] + dst1 := v_0 + src1 := v_1 + store := v_2 if store.Op != OpStore { break } @@ -12593,10 +11904,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src1 := v.Args[1] - move := v.Args[2] + dst1 := v_0 + src1 := v_1 + move := v_2 if move.Op != OpMove || move.AuxInt != n || move.Aux != t { break } @@ -12619,10 +11929,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src1 := v.Args[1] - vardef := v.Args[2] + dst1 := v_0 + src1 := v_1 + vardef := v_2 if vardef.Op != OpVarDef { break } @@ -12653,10 +11962,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src1 := v.Args[1] - zero := v.Args[2] + dst1 := v_0 + src1 := v_1 + zero := v_2 if zero.Op != OpZero || zero.AuxInt != n || zero.Aux != t { break } @@ -12679,10 +11987,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[2] - dst1 := v.Args[0] - src1 := v.Args[1] - vardef := v.Args[2] + dst1 := v_0 + src1 := v_1 + vardef := v_2 if vardef.Op != OpVarDef { break } @@ -12713,10 +12020,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -12773,10 +12079,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -12855,10 +12160,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -12956,6 +12260,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { return false } func rewriteValuegeneric_OpMove_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) @@ -12963,10 +12270,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13027,10 +12333,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13113,10 +12418,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13221,10 +12525,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -13269,10 +12572,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -13339,10 +12641,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -13431,10 +12732,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -13545,10 +12845,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13597,10 +12896,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13671,10 +12969,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13764,6 +13061,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { return false } func rewriteValuegeneric_OpMove_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _))))))) @@ -13772,10 +13072,9 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - p1 := v.Args[1] - mem := v.Args[2] + dst := v_0 + p1 := v_1 + mem := v_2 if mem.Op != OpVarDef { break } @@ -13890,10 +13189,9 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool { for { s := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - tmp1 := v.Args[1] - midmem := v.Args[2] + dst := v_0 + tmp1 := v_1 + midmem := v_2 if midmem.Op != OpMove || midmem.AuxInt != s { break } @@ -13918,10 +13216,9 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool { for { s := v.AuxInt t1 := v.Aux - _ = v.Args[2] - dst := v.Args[0] - tmp1 := v.Args[1] - midmem := v.Args[2] + dst := v_0 + tmp1 := v_1 + midmem := v_2 if midmem.Op != OpVarDef { break } @@ -13948,9 +13245,9 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool { // cond: isSamePtr(dst, src) // result: mem for { - mem := v.Args[2] - dst := v.Args[0] - src := v.Args[1] + dst := v_0 + src := v_1 + mem := v_2 if !(isSamePtr(dst, src)) { break } @@ -13962,19 +13259,18 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool { return false } func rewriteValuegeneric_OpMul16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c*d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -13988,13 +13284,11 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { // match: (Mul16 (Const16 [1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -14005,13 +13299,11 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { // match: (Mul16 (Const16 [-1]) x) // result: (Neg16 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpNeg16) v.AddArg(x) return true @@ -14023,10 +13315,8 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { // result: (Lsh16x64 n (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst16 { continue } @@ -14049,10 +13339,8 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { // result: (Neg16 (Lsh16x64 n (Const64 [log2(-c)]))) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst16 { continue } @@ -14074,9 +13362,7 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { // match: (Mul16 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != 0 { continue } @@ -14089,26 +13375,24 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { // match: (Mul16 (Const16 [c]) (Mul16 (Const16 [d]) x)) // result: (Mul16 (Const16 [int64(int16(c*d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpMul16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c * d)) @@ -14122,19 +13406,18 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool { return false } func rewriteValuegeneric_OpMul32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c*d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -14148,13 +13431,11 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // match: (Mul32 (Const32 [1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -14165,13 +13446,11 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // match: (Mul32 (Const32 [-1]) x) // result: (Neg32 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpNeg32) v.AddArg(x) return true @@ -14183,10 +13462,8 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // result: (Lsh32x64 n (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst32 { continue } @@ -14209,10 +13486,8 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // result: (Neg32 (Lsh32x64 n (Const64 [log2(-c)]))) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst32 { continue } @@ -14234,26 +13509,24 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // match: (Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd32 || v_1.Type != t { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) @@ -14272,9 +13545,7 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // match: (Mul32 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } @@ -14287,26 +13558,24 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { // match: (Mul32 (Const32 [c]) (Mul32 (Const32 [d]) x)) // result: (Mul32 (Const32 [int64(int32(c*d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpMul32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) @@ -14320,17 +13589,16 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool { return false } func rewriteValuegeneric_OpMul32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul32F (Const32F [c]) (Const32F [d])) // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32F { continue } @@ -14344,10 +13612,8 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { // match: (Mul32F x (Const32F [auxFrom64F(1)])) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom64F(1) { continue } @@ -14361,10 +13627,8 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { // match: (Mul32F x (Const32F [auxFrom32F(-1)])) // result: (Neg32F x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom32F(-1) { continue } @@ -14377,10 +13641,8 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { // match: (Mul32F x (Const32F [auxFrom32F(2)])) // result: (Add32F x x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpConst32F || v_1.AuxInt != auxFrom32F(2) { continue } @@ -14394,19 +13656,18 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpMul64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c*d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -14420,13 +13681,11 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // match: (Mul64 (Const64 [1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -14437,13 +13696,11 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // match: (Mul64 (Const64 [-1]) x) // result: (Neg64 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpNeg64) v.AddArg(x) return true @@ -14455,10 +13712,8 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // result: (Lsh64x64 n (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst64 { continue } @@ -14481,10 +13736,8 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // result: (Neg64 (Lsh64x64 n (Const64 [log2(-c)]))) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst64 { continue } @@ -14506,26 +13759,24 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // match: (Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd64 || v_1.Type != t { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c * d @@ -14544,9 +13795,7 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // match: (Mul64 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } @@ -14559,26 +13808,24 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { // match: (Mul64 (Const64 [c]) (Mul64 (Const64 [d]) x)) // result: (Mul64 (Const64 [c*d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpMul64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c * d @@ -14592,17 +13839,16 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool { return false } func rewriteValuegeneric_OpMul64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Mul64F (Const64F [c]) (Const64F [d])) // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64F { continue } @@ -14616,10 +13862,8 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { // match: (Mul64F x (Const64F [auxFrom64F(1)])) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(1) { continue } @@ -14633,10 +13877,8 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { // match: (Mul64F x (Const64F [auxFrom64F(-1)])) // result: (Neg64F x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(-1) { continue } @@ -14649,10 +13891,8 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { // match: (Mul64F x (Const64F [auxFrom64F(2)])) // result: (Add64F x x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpConst64F || v_1.AuxInt != auxFrom64F(2) { continue } @@ -14666,19 +13906,18 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpMul8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mul8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c*d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -14692,13 +13931,11 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // match: (Mul8 (Const8 [1]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -14709,13 +13946,11 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // match: (Mul8 (Const8 [-1]) x) // result: (Neg8 x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != -1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpNeg8) v.AddArg(x) return true @@ -14727,10 +13962,8 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // result: (Lsh8x64 n (Const64 [log2(c)])) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst8 { continue } @@ -14753,10 +13986,8 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // result: (Neg8 (Lsh8x64 n (Const64 [log2(-c)]))) for { t := v.Type - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpConst8 { continue } @@ -14778,9 +14009,7 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // match: (Mul8 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != 0 { continue } @@ -14793,26 +14022,24 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { // match: (Mul8 (Const8 [c]) (Mul8 (Const8 [d]) x)) // result: (Mul8 (Const8 [int64(int8(c*d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpMul8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpMul8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c * d)) @@ -14826,11 +14053,11 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeg16_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Neg16 (Const16 [c])) // result: (Const16 [int64(-int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -14842,7 +14069,6 @@ func rewriteValuegeneric_OpNeg16_0(v *Value) bool { // match: (Neg16 (Sub16 x y)) // result: (Sub16 y x) for { - v_0 := v.Args[0] if v_0.Op != OpSub16 { break } @@ -14856,7 +14082,6 @@ func rewriteValuegeneric_OpNeg16_0(v *Value) bool { // match: (Neg16 (Neg16 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpNeg16 { break } @@ -14870,7 +14095,6 @@ func rewriteValuegeneric_OpNeg16_0(v *Value) bool { // result: (Add16 (Const16 [1]) x) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpCom16 { break } @@ -14885,11 +14109,11 @@ func rewriteValuegeneric_OpNeg16_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeg32_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Neg32 (Const32 [c])) // result: (Const32 [int64(-int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -14901,7 +14125,6 @@ func rewriteValuegeneric_OpNeg32_0(v *Value) bool { // match: (Neg32 (Sub32 x y)) // result: (Sub32 y x) for { - v_0 := v.Args[0] if v_0.Op != OpSub32 { break } @@ -14915,7 +14138,6 @@ func rewriteValuegeneric_OpNeg32_0(v *Value) bool { // match: (Neg32 (Neg32 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpNeg32 { break } @@ -14929,7 +14151,6 @@ func rewriteValuegeneric_OpNeg32_0(v *Value) bool { // result: (Add32 (Const32 [1]) x) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpCom32 { break } @@ -14944,11 +14165,11 @@ func rewriteValuegeneric_OpNeg32_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeg32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg32F (Const32F [c])) // cond: auxTo32F(c) != 0 // result: (Const32F [auxFrom32F(-auxTo32F(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } @@ -14963,11 +14184,11 @@ func rewriteValuegeneric_OpNeg32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeg64_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Neg64 (Const64 [c])) // result: (Const64 [-c]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -14979,7 +14200,6 @@ func rewriteValuegeneric_OpNeg64_0(v *Value) bool { // match: (Neg64 (Sub64 x y)) // result: (Sub64 y x) for { - v_0 := v.Args[0] if v_0.Op != OpSub64 { break } @@ -14993,7 +14213,6 @@ func rewriteValuegeneric_OpNeg64_0(v *Value) bool { // match: (Neg64 (Neg64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpNeg64 { break } @@ -15007,7 +14226,6 @@ func rewriteValuegeneric_OpNeg64_0(v *Value) bool { // result: (Add64 (Const64 [1]) x) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpCom64 { break } @@ -15022,11 +14240,11 @@ func rewriteValuegeneric_OpNeg64_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeg64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Neg64F (Const64F [c])) // cond: auxTo64F(c) != 0 // result: (Const64F [auxFrom64F(-auxTo64F(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } @@ -15041,11 +14259,11 @@ func rewriteValuegeneric_OpNeg64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeg8_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block // match: (Neg8 (Const8 [c])) // result: (Const8 [int64( -int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -15057,7 +14275,6 @@ func rewriteValuegeneric_OpNeg8_0(v *Value) bool { // match: (Neg8 (Sub8 x y)) // result: (Sub8 y x) for { - v_0 := v.Args[0] if v_0.Op != OpSub8 { break } @@ -15071,7 +14288,6 @@ func rewriteValuegeneric_OpNeg8_0(v *Value) bool { // match: (Neg8 (Neg8 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpNeg8 { break } @@ -15085,7 +14301,6 @@ func rewriteValuegeneric_OpNeg8_0(v *Value) bool { // result: (Add8 (Const8 [1]) x) for { t := v.Type - v_0 := v.Args[0] if v_0.Op != OpCom8 { break } @@ -15100,13 +14315,15 @@ func rewriteValuegeneric_OpNeg8_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeq16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x x) // result: (ConstBool [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -15116,26 +14333,24 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { // match: (Neq16 (Const16 [c]) (Add16 (Const16 [d]) x)) // result: (Neq16 (Const16 [int64(int16(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c - d)) @@ -15149,14 +14364,11 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { // match: (Neq16 (Const16 [c]) (Const16 [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -15171,10 +14383,8 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { // cond: k > 0 && k < 15 && kbar == 16 - k // result: (Neq16 (And16 n (Const16 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh16x64 { continue } @@ -15190,12 +14400,10 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -15244,15 +14452,13 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { // cond: s.Uses == 1 // result: (Neq16 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub16 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -15267,22 +14473,20 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd16 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst16 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -15304,13 +14508,15 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeq32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq32 x x) // result: (ConstBool [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -15320,26 +14526,24 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { // match: (Neq32 (Const32 [c]) (Add32 (Const32 [d]) x)) // result: (Neq32 (Const32 [int64(int32(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpNeq32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c - d)) @@ -15353,14 +14557,11 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { // match: (Neq32 (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -15375,10 +14576,8 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { // cond: k > 0 && k < 31 && kbar == 32 - k // result: (Neq32 (And32 n (Const32 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh32x64 { continue } @@ -15394,12 +14593,10 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -15448,15 +14645,13 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { // cond: s.Uses == 1 // result: (Neq32 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub32 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -15471,22 +14666,20 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd32 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst32 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -15508,17 +14701,16 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq32F (Const32F [c]) (Const32F [d])) // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32F { continue } @@ -15532,13 +14724,15 @@ func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeq64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq64 x x) // result: (ConstBool [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -15548,26 +14742,24 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { // match: (Neq64 (Const64 [c]) (Add64 (Const64 [d]) x)) // result: (Neq64 (Const64 [c-d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpNeq64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c - d @@ -15581,14 +14773,11 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { // match: (Neq64 (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -15603,10 +14792,8 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { // cond: k > 0 && k < 63 && kbar == 64 - k // result: (Neq64 (And64 n (Const64 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh64x64 { continue } @@ -15622,12 +14809,10 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -15676,15 +14861,13 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { // cond: s.Uses == 1 // result: (Neq64 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub64 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -15699,22 +14882,20 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd64 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst64 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -15736,17 +14917,16 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Neq64F (Const64F [c]) (Const64F [d])) // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64F { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64F { continue } @@ -15760,13 +14940,15 @@ func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeq8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x x) // result: (ConstBool [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -15776,26 +14958,24 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { // match: (Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) // result: (Neq8 (Const8 [int64(int8(c-d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpAdd8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpNeq8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c - d)) @@ -15809,14 +14989,11 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { // match: (Neq8 (Const8 [c]) (Const8 [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -15831,10 +15008,8 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { // cond: k > 0 && k < 7 && kbar == 8 - k // result: (Neq8 (And8 n (Const8 [int64(1< [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - n := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 if v_1.Op != OpLsh8x64 { continue } @@ -15850,12 +15025,10 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { } t := v_1_0_0.Type _ = v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if n != v_1_0_0.Args[_i1] { - continue - } - v_1_0_0_1 := v_1_0_0.Args[1^_i1] - if v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { continue } _ = v_1_0_0_1.Args[1] @@ -15904,15 +15077,13 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { // cond: s.Uses == 1 // result: (Neq8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - s := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + s := v_0 if s.Op != OpSub8 { continue } y := s.Args[1] x := s.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(s.Uses == 1) { continue } @@ -15927,22 +15098,20 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { // cond: isPowerOfTwo(y) // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd8 { continue } t := v_0.Type _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst8 || v_0_1.Type != t { continue } y := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 || v_1.Type != t || v_1.AuxInt != y || !(isPowerOfTwo(y)) { continue } @@ -15964,17 +15133,16 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeqB_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqB (ConstBool [c]) (ConstBool [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstBool { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConstBool { continue } @@ -15988,13 +15156,11 @@ func rewriteValuegeneric_OpNeqB_0(v *Value) bool { // match: (NeqB (ConstBool [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstBool || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -16005,13 +15171,11 @@ func rewriteValuegeneric_OpNeqB_0(v *Value) bool { // match: (NeqB (ConstBool [1]) x) // result: (Not x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstBool || v_0.AuxInt != 1 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpNot) v.AddArg(x) return true @@ -16021,14 +15185,11 @@ func rewriteValuegeneric_OpNeqB_0(v *Value) bool { // match: (NeqB (Not x) (Not y)) // result: (NeqB x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpNot { continue } x := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpNot { continue } @@ -16043,13 +15204,15 @@ func rewriteValuegeneric_OpNeqB_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeqInter_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NeqInter x y) // result: (NeqPtr (ITab x) (ITab y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNeqPtr) v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr) v0.AddArg(x) @@ -16061,11 +15224,13 @@ func rewriteValuegeneric_OpNeqInter_0(v *Value) bool { } } func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqPtr x x) // result: (ConstBool [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConstBool) @@ -16075,14 +15240,11 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (Addr {a} _) (Addr {b} _)) // result: (ConstBool [b2i(a != b)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } a := v_0.Aux - v_1 := v.Args[1^_i0] if v_1.Op != OpAddr { continue } @@ -16096,14 +15258,11 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) // result: (ConstBool [b2i(a != b || o != 0)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } a := v_0.Aux - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16122,9 +15281,7 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) // result: (ConstBool [b2i(a != b || o1 != o2)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -16134,7 +15291,6 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { continue } a := v_0_0.Aux - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16153,15 +15309,12 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) // result: (ConstBool [b2i(a != b)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } a := v_0.Aux _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpLocalAddr { continue } @@ -16176,15 +15329,12 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a != b || o != 0)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } a := v_0.Aux _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16204,9 +15354,7 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) // result: (ConstBool [b2i(a != b || o1 != o2)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -16217,7 +15365,6 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { } a := v_0_0.Aux _ = v_0_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16238,15 +15385,13 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 != 0)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } o1 := v_0.AuxInt p1 := v_0.Args[0] - p2 := v.Args[1^_i0] + p2 := v_1 if !(isSamePtr(p1, p2)) { continue } @@ -16260,15 +15405,12 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // cond: isSamePtr(p1, p2) // result: (ConstBool [b2i(o1 != o2)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } o1 := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16286,14 +15428,11 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { // match: (NeqPtr (Const32 [c]) (Const32 [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -16307,17 +15446,16 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (NeqPtr (Const64 [c]) (Const64 [d])) // result: (ConstBool [b2i(c != d)]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -16331,14 +15469,11 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (LocalAddr _ _) (Addr _)) // result: (ConstBool [1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpAddr { continue } @@ -16351,9 +15486,7 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) // result: (ConstBool [1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -16362,7 +15495,6 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { continue } _ = v_0_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpAddr { continue } @@ -16375,14 +15507,11 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) // result: (ConstBool [1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } _ = v_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16399,9 +15528,7 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) // result: (ConstBool [1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { continue } @@ -16410,7 +15537,6 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { continue } _ = v_0_0.Args[1] - v_1 := v.Args[1^_i0] if v_1.Op != OpOffPtr { continue } @@ -16428,15 +15554,13 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // cond: isSamePtr(p1, p2) // result: (IsNonNil o1) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddPtr { continue } o1 := v_0.Args[1] p1 := v_0.Args[0] - p2 := v.Args[1^_i0] + p2 := v_1 if !(isSamePtr(p1, p2)) { continue } @@ -16449,13 +15573,11 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (Const32 [0]) p) // result: (IsNonNil p) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } - p := v.Args[1^_i0] + p := v_1 v.reset(OpIsNonNil) v.AddArg(p) return true @@ -16465,13 +15587,11 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (Const64 [0]) p) // result: (IsNonNil p) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } - p := v.Args[1^_i0] + p := v_1 v.reset(OpIsNonNil) v.AddArg(p) return true @@ -16481,13 +15601,11 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { // match: (NeqPtr (ConstNil) p) // result: (IsNonNil p) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConstNil { continue } - p := v.Args[1^_i0] + p := v_1 v.reset(OpIsNonNil) v.AddArg(p) return true @@ -16497,13 +15615,15 @@ func rewriteValuegeneric_OpNeqPtr_10(v *Value) bool { return false } func rewriteValuegeneric_OpNeqSlice_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (NeqSlice x y) // result: (NeqPtr (SlicePtr x) (SlicePtr y)) for { - y := v.Args[1] - x := v.Args[0] + x := v_0 + y := v_1 v.reset(OpNeqPtr) v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr) v0.AddArg(x) @@ -16515,15 +15635,19 @@ func rewriteValuegeneric_OpNeqSlice_0(v *Value) bool { } } func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config fe := b.Func.fe // match: (NilCheck (GetG mem) mem) // result: mem for { - mem := v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpGetG || mem != v_0.Args[0] { + if v_0.Op != OpGetG { + break + } + mem := v_0.Args[0] + if mem != v_1 { break } v.reset(OpCopy) @@ -16535,8 +15659,6 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLoad { break } @@ -16565,8 +15687,6 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } @@ -16598,10 +15718,10 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { return false } func rewriteValuegeneric_OpNot_0(v *Value) bool { + v_0 := v.Args[0] // match: (Not (ConstBool [c])) // result: (ConstBool [1-c]) for { - v_0 := v.Args[0] if v_0.Op != OpConstBool { break } @@ -16613,7 +15733,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Eq64 x y)) // result: (Neq64 x y) for { - v_0 := v.Args[0] if v_0.Op != OpEq64 { break } @@ -16627,7 +15746,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Eq32 x y)) // result: (Neq32 x y) for { - v_0 := v.Args[0] if v_0.Op != OpEq32 { break } @@ -16641,7 +15759,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Eq16 x y)) // result: (Neq16 x y) for { - v_0 := v.Args[0] if v_0.Op != OpEq16 { break } @@ -16655,7 +15772,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Eq8 x y)) // result: (Neq8 x y) for { - v_0 := v.Args[0] if v_0.Op != OpEq8 { break } @@ -16669,7 +15785,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (EqB x y)) // result: (NeqB x y) for { - v_0 := v.Args[0] if v_0.Op != OpEqB { break } @@ -16683,7 +15798,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (EqPtr x y)) // result: (NeqPtr x y) for { - v_0 := v.Args[0] if v_0.Op != OpEqPtr { break } @@ -16697,7 +15811,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Eq64F x y)) // result: (Neq64F x y) for { - v_0 := v.Args[0] if v_0.Op != OpEq64F { break } @@ -16711,7 +15824,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Eq32F x y)) // result: (Neq32F x y) for { - v_0 := v.Args[0] if v_0.Op != OpEq32F { break } @@ -16725,7 +15837,6 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { // match: (Not (Neq64 x y)) // result: (Eq64 x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeq64 { break } @@ -16739,10 +15850,10 @@ func rewriteValuegeneric_OpNot_0(v *Value) bool { return false } func rewriteValuegeneric_OpNot_10(v *Value) bool { + v_0 := v.Args[0] // match: (Not (Neq32 x y)) // result: (Eq32 x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeq32 { break } @@ -16756,7 +15867,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Neq16 x y)) // result: (Eq16 x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeq16 { break } @@ -16770,7 +15880,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Neq8 x y)) // result: (Eq8 x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeq8 { break } @@ -16784,7 +15893,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (NeqB x y)) // result: (EqB x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeqB { break } @@ -16798,7 +15906,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (NeqPtr x y)) // result: (EqPtr x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeqPtr { break } @@ -16812,7 +15919,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Neq64F x y)) // result: (Eq64F x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeq64F { break } @@ -16826,7 +15932,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Neq32F x y)) // result: (Eq32F x y) for { - v_0 := v.Args[0] if v_0.Op != OpNeq32F { break } @@ -16840,7 +15945,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Greater64 x y)) // result: (Leq64 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater64 { break } @@ -16854,7 +15958,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Greater32 x y)) // result: (Leq32 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater32 { break } @@ -16868,7 +15971,6 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { // match: (Not (Greater16 x y)) // result: (Leq16 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater16 { break } @@ -16882,10 +15984,10 @@ func rewriteValuegeneric_OpNot_10(v *Value) bool { return false } func rewriteValuegeneric_OpNot_20(v *Value) bool { + v_0 := v.Args[0] // match: (Not (Greater8 x y)) // result: (Leq8 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater8 { break } @@ -16899,7 +16001,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Greater64U x y)) // result: (Leq64U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater64U { break } @@ -16913,7 +16014,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Greater32U x y)) // result: (Leq32U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater32U { break } @@ -16927,7 +16027,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Greater16U x y)) // result: (Leq16U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater16U { break } @@ -16941,7 +16040,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Greater8U x y)) // result: (Leq8U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGreater8U { break } @@ -16955,7 +16053,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Geq64 x y)) // result: (Less64 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq64 { break } @@ -16969,7 +16066,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Geq32 x y)) // result: (Less32 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq32 { break } @@ -16983,7 +16079,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Geq16 x y)) // result: (Less16 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq16 { break } @@ -16997,7 +16092,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Geq8 x y)) // result: (Less8 x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq8 { break } @@ -17011,7 +16105,6 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { // match: (Not (Geq64U x y)) // result: (Less64U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq64U { break } @@ -17025,10 +16118,10 @@ func rewriteValuegeneric_OpNot_20(v *Value) bool { return false } func rewriteValuegeneric_OpNot_30(v *Value) bool { + v_0 := v.Args[0] // match: (Not (Geq32U x y)) // result: (Less32U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq32U { break } @@ -17042,7 +16135,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Geq16U x y)) // result: (Less16U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq16U { break } @@ -17056,7 +16148,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Geq8U x y)) // result: (Less8U x y) for { - v_0 := v.Args[0] if v_0.Op != OpGeq8U { break } @@ -17070,7 +16161,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less64 x y)) // result: (Geq64 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess64 { break } @@ -17084,7 +16174,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less32 x y)) // result: (Geq32 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess32 { break } @@ -17098,7 +16187,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less16 x y)) // result: (Geq16 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess16 { break } @@ -17112,7 +16200,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less8 x y)) // result: (Geq8 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess8 { break } @@ -17126,7 +16213,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less64U x y)) // result: (Geq64U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess64U { break } @@ -17140,7 +16226,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less32U x y)) // result: (Geq32U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess32U { break } @@ -17154,7 +16239,6 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { // match: (Not (Less16U x y)) // result: (Geq16U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess16U { break } @@ -17168,10 +16252,10 @@ func rewriteValuegeneric_OpNot_30(v *Value) bool { return false } func rewriteValuegeneric_OpNot_40(v *Value) bool { + v_0 := v.Args[0] // match: (Not (Less8U x y)) // result: (Geq8U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLess8U { break } @@ -17185,7 +16269,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq64 x y)) // result: (Greater64 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq64 { break } @@ -17199,7 +16282,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq32 x y)) // result: (Greater32 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq32 { break } @@ -17213,7 +16295,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq16 x y)) // result: (Greater16 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq16 { break } @@ -17227,7 +16308,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq8 x y)) // result: (Greater8 x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq8 { break } @@ -17241,7 +16321,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq64U x y)) // result: (Greater64U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq64U { break } @@ -17255,7 +16334,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq32U x y)) // result: (Greater32U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq32U { break } @@ -17269,7 +16347,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq16U x y)) // result: (Greater16U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq16U { break } @@ -17283,7 +16360,6 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { // match: (Not (Leq8U x y)) // result: (Greater8U x y) for { - v_0 := v.Args[0] if v_0.Op != OpLeq8U { break } @@ -17297,11 +16373,11 @@ func rewriteValuegeneric_OpNot_40(v *Value) bool { return false } func rewriteValuegeneric_OpOffPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (OffPtr (OffPtr p [b]) [a]) // result: (OffPtr p [a+b]) for { a := v.AuxInt - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } @@ -17319,7 +16395,7 @@ func rewriteValuegeneric_OpOffPtr_0(v *Value) bool { if v.AuxInt != 0 { break } - p := v.Args[0] + p := v_0 if !(v.Type.Compare(p.Type) == types.CMPeq) { break } @@ -17331,18 +16407,17 @@ func rewriteValuegeneric_OpOffPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpOr16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Or16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c|d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -17356,8 +16431,8 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // match: (Or16 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -17368,13 +16443,11 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // match: (Or16 (Const16 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17385,9 +16458,7 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // match: (Or16 (Const16 [-1]) _) // result: (Const16 [-1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != -1 { continue } @@ -17400,19 +16471,19 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // match: (Or16 x (Or16 x y)) // result: (Or16 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpOr16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpOr16) v.AddArg(x) v.AddArg(y) @@ -17425,21 +16496,19 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // cond: ^(c1 | c2) == 0 // result: (Or16 (Const16 [c1]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd16 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst16 { continue } c2 := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -17462,21 +16531,21 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Or16 i (Or16 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOr16 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst16 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst16 && x.Op != OpConst16) { continue } @@ -17494,26 +16563,24 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { // match: (Or16 (Const16 [c]) (Or16 (Const16 [d]) x)) // result: (Or16 (Const16 [int64(int16(c|d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpOr16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c | d)) @@ -17527,18 +16594,17 @@ func rewriteValuegeneric_OpOr16_0(v *Value) bool { return false } func rewriteValuegeneric_OpOr32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Or32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c|d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -17552,8 +16618,8 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // match: (Or32 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -17564,13 +16630,11 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // match: (Or32 (Const32 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17581,9 +16645,7 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // match: (Or32 (Const32 [-1]) _) // result: (Const32 [-1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != -1 { continue } @@ -17596,19 +16658,19 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // match: (Or32 x (Or32 x y)) // result: (Or32 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpOr32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpOr32) v.AddArg(x) v.AddArg(y) @@ -17621,21 +16683,19 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // cond: ^(c1 | c2) == 0 // result: (Or32 (Const32 [c1]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd32 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst32 { continue } c2 := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -17658,21 +16718,21 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Or32 i (Or32 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOr32 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst32 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst32 && x.Op != OpConst32) { continue } @@ -17690,26 +16750,24 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { // match: (Or32 (Const32 [c]) (Or32 (Const32 [d]) x)) // result: (Or32 (Const32 [int64(int32(c|d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpOr32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c | d)) @@ -17723,18 +16781,17 @@ func rewriteValuegeneric_OpOr32_0(v *Value) bool { return false } func rewriteValuegeneric_OpOr64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Or64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c|d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -17748,8 +16805,8 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // match: (Or64 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -17760,13 +16817,11 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // match: (Or64 (Const64 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17777,9 +16832,7 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // match: (Or64 (Const64 [-1]) _) // result: (Const64 [-1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != -1 { continue } @@ -17792,19 +16845,19 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // match: (Or64 x (Or64 x y)) // result: (Or64 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpOr64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpOr64) v.AddArg(x) v.AddArg(y) @@ -17817,21 +16870,19 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // cond: ^(c1 | c2) == 0 // result: (Or64 (Const64 [c1]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd64 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst64 { continue } c2 := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -17854,21 +16905,21 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Or64 i (Or64 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOr64 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst64 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } @@ -17886,26 +16937,24 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) // result: (Or64 (Const64 [c|d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpOr64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c | d @@ -17919,18 +16968,17 @@ func rewriteValuegeneric_OpOr64_0(v *Value) bool { return false } func rewriteValuegeneric_OpOr8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Or8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c|d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -17944,8 +16992,8 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // match: (Or8 x x) // result: x for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpCopy) @@ -17956,13 +17004,11 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // match: (Or8 (Const8 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -17973,9 +17019,7 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // match: (Or8 (Const8 [-1]) _) // result: (Const8 [-1]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != -1 { continue } @@ -17988,19 +17032,19 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // match: (Or8 x (Or8 x y)) // result: (Or8 x y) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpOr8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpOr8) v.AddArg(x) v.AddArg(y) @@ -18013,21 +17057,19 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // cond: ^(c1 | c2) == 0 // result: (Or8 (Const8 [c1]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAnd8 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - x := v_0.Args[_i1] - v_0_1 := v_0.Args[1^_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 if v_0_1.Op != OpConst8 { continue } c2 := v_0_1.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -18050,21 +17092,21 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Or8 i (Or8 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOr8 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst8 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst8 && x.Op != OpConst8) { continue } @@ -18082,26 +17124,24 @@ func rewriteValuegeneric_OpOr8_0(v *Value) bool { // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) // result: (Or8 (Const8 [int64(int8(c|d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpOr8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c | d)) @@ -18186,6 +17226,8 @@ func rewriteValuegeneric_OpPhi_0(v *Value) bool { return false } func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types @@ -18194,8 +17236,8 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool { // result: (AddPtr ptr (Mul32 idx (Const32 [t.Elem().Size()]))) for { t := v.Type - idx := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + idx := v_1 if !(config.PtrSize == 4) { break } @@ -18214,8 +17256,8 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool { // result: (AddPtr ptr (Mul64 idx (Const64 [t.Elem().Size()]))) for { t := v.Type - idx := v.Args[1] - ptr := v.Args[0] + ptr := v_0 + idx := v_1 if !(config.PtrSize == 8) { break } @@ -18232,13 +17274,13 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool { return false } func rewriteValuegeneric_OpRotateLeft16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft16 x (Const16 [c])) // cond: c%16 == 0 // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -18254,13 +17296,13 @@ func rewriteValuegeneric_OpRotateLeft16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRotateLeft32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft32 x (Const32 [c])) // cond: c%32 == 0 // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -18276,13 +17318,13 @@ func rewriteValuegeneric_OpRotateLeft32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRotateLeft64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft64 x (Const64 [c])) // cond: c%64 == 0 // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -18298,13 +17340,13 @@ func rewriteValuegeneric_OpRotateLeft64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRotateLeft8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (RotateLeft8 x (Const8 [c])) // cond: c%8 == 0 // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -18320,10 +17362,11 @@ func rewriteValuegeneric_OpRotateLeft8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRound32F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round32F x:(Const32F)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpConst32F { break } @@ -18335,10 +17378,11 @@ func rewriteValuegeneric_OpRound32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpRound64F_0(v *Value) bool { + v_0 := v.Args[0] // match: (Round64F x:(Const64F)) // result: x for { - x := v.Args[0] + x := v_0 if x.Op != OpConst64F { break } @@ -18350,14 +17394,14 @@ func rewriteValuegeneric_OpRound64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux16 x (Const16 [c])) // result: (Rsh16Ux64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -18372,8 +17416,6 @@ func rewriteValuegeneric_OpRsh16Ux16_0(v *Value) bool { // match: (Rsh16Ux16 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18384,14 +17426,14 @@ func rewriteValuegeneric_OpRsh16Ux16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux32 x (Const32 [c])) // result: (Rsh16Ux64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -18406,8 +17448,6 @@ func rewriteValuegeneric_OpRsh16Ux32_0(v *Value) bool { // match: (Rsh16Ux32 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18418,18 +17458,17 @@ func rewriteValuegeneric_OpRsh16Ux32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 (Const16 [c]) (Const64 [d])) // result: (Const16 [int64(int16(uint16(c) >> uint64(d)))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18441,9 +17480,7 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // match: (Rsh16Ux64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -18455,8 +17492,6 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // match: (Rsh16Ux64 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18468,8 +17503,6 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // cond: uint64(c) >= 16 // result: (Const16 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18486,8 +17519,6 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // result: (Rsh16Ux64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh16Ux64 { break } @@ -18498,7 +17529,6 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18516,14 +17546,11 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 [15])) // result: (Rsh16Ux64 x (Const64 [15])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh16x64 { break } _ = v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18542,8 +17569,6 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh16Ux64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh16x64 { break } @@ -18564,7 +17589,6 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18582,19 +17606,13 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) // result: (ZeroExt8to16 (Trunc16to8 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh16x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 8 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 8 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 8 || v_1.Op != OpConst64 || v_1.AuxInt != 8 { break } v.reset(OpZeroExt8to16) @@ -18606,14 +17624,14 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux8 x (Const8 [c])) // result: (Rsh16Ux64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -18628,8 +17646,6 @@ func rewriteValuegeneric_OpRsh16Ux8_0(v *Value) bool { // match: (Rsh16Ux8 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18640,14 +17656,14 @@ func rewriteValuegeneric_OpRsh16Ux8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x16 x (Const16 [c])) // result: (Rsh16x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -18662,8 +17678,6 @@ func rewriteValuegeneric_OpRsh16x16_0(v *Value) bool { // match: (Rsh16x16 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18674,14 +17688,14 @@ func rewriteValuegeneric_OpRsh16x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x32 x (Const32 [c])) // result: (Rsh16x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -18696,8 +17710,6 @@ func rewriteValuegeneric_OpRsh16x32_0(v *Value) bool { // match: (Rsh16x32 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18708,18 +17720,17 @@ func rewriteValuegeneric_OpRsh16x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 (Const16 [c]) (Const64 [d])) // result: (Const16 [int64(int16(c) >> uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18731,9 +17742,7 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { // match: (Rsh16x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -18745,8 +17754,6 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { // match: (Rsh16x64 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18759,8 +17766,6 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { // result: (Rsh16x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh16x64 { break } @@ -18771,7 +17776,6 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18789,19 +17793,13 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) // result: (SignExt8to16 (Trunc16to8 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh16x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 8 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 8 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 8 || v_1.Op != OpConst64 || v_1.AuxInt != 8 { break } v.reset(OpSignExt8to16) @@ -18813,14 +17811,14 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh16x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh16x8 x (Const8 [c])) // result: (Rsh16x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -18835,8 +17833,6 @@ func rewriteValuegeneric_OpRsh16x8_0(v *Value) bool { // match: (Rsh16x8 (Const16 [0]) _) // result: (Const16 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 || v_0.AuxInt != 0 { break } @@ -18847,14 +17843,14 @@ func rewriteValuegeneric_OpRsh16x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux16 x (Const16 [c])) // result: (Rsh32Ux64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -18869,8 +17865,6 @@ func rewriteValuegeneric_OpRsh32Ux16_0(v *Value) bool { // match: (Rsh32Ux16 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -18881,14 +17875,14 @@ func rewriteValuegeneric_OpRsh32Ux16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux32 x (Const32 [c])) // result: (Rsh32Ux64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -18903,8 +17897,6 @@ func rewriteValuegeneric_OpRsh32Ux32_0(v *Value) bool { // match: (Rsh32Ux32 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -18915,18 +17907,17 @@ func rewriteValuegeneric_OpRsh32Ux32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 (Const32 [c]) (Const64 [d])) // result: (Const32 [int64(int32(uint32(c) >> uint64(d)))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18938,9 +17929,7 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -18952,8 +17941,6 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -18965,8 +17952,6 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // cond: uint64(c) >= 32 // result: (Const32 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -18983,8 +17968,6 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // result: (Rsh32Ux64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh32Ux64 { break } @@ -18995,7 +17978,6 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19013,14 +17995,11 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 [31])) // result: (Rsh32Ux64 x (Const64 [31])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh32x64 { break } _ = v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19039,8 +18018,6 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh32Ux64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh32x64 { break } @@ -19061,7 +18038,6 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19079,19 +18055,13 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) // result: (ZeroExt8to32 (Trunc32to8 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh32x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 24 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 24 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 24 || v_1.Op != OpConst64 || v_1.AuxInt != 24 { break } v.reset(OpZeroExt8to32) @@ -19103,19 +18073,13 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) // result: (ZeroExt16to32 (Trunc32to16 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh32x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 16 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 16 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 16 || v_1.Op != OpConst64 || v_1.AuxInt != 16 { break } v.reset(OpZeroExt16to32) @@ -19127,14 +18091,14 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux8 x (Const8 [c])) // result: (Rsh32Ux64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -19149,8 +18113,6 @@ func rewriteValuegeneric_OpRsh32Ux8_0(v *Value) bool { // match: (Rsh32Ux8 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -19161,14 +18123,14 @@ func rewriteValuegeneric_OpRsh32Ux8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x16 x (Const16 [c])) // result: (Rsh32x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -19183,8 +18145,6 @@ func rewriteValuegeneric_OpRsh32x16_0(v *Value) bool { // match: (Rsh32x16 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -19195,14 +18155,14 @@ func rewriteValuegeneric_OpRsh32x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x32 x (Const32 [c])) // result: (Rsh32x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -19217,8 +18177,6 @@ func rewriteValuegeneric_OpRsh32x32_0(v *Value) bool { // match: (Rsh32x32 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -19229,18 +18187,17 @@ func rewriteValuegeneric_OpRsh32x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 (Const32 [c]) (Const64 [d])) // result: (Const32 [int64(int32(c) >> uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19252,9 +18209,7 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -19266,8 +18221,6 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -19280,8 +18233,6 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { // result: (Rsh32x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh32x64 { break } @@ -19292,7 +18243,6 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19310,19 +18260,13 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) // result: (SignExt8to32 (Trunc32to8 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh32x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 24 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 24 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 24 || v_1.Op != OpConst64 || v_1.AuxInt != 24 { break } v.reset(OpSignExt8to32) @@ -19334,19 +18278,13 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { // match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) // result: (SignExt16to32 (Trunc32to16 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh32x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 16 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 16 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 16 || v_1.Op != OpConst64 || v_1.AuxInt != 16 { break } v.reset(OpSignExt16to32) @@ -19358,14 +18296,14 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh32x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh32x8 x (Const8 [c])) // result: (Rsh32x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -19380,8 +18318,6 @@ func rewriteValuegeneric_OpRsh32x8_0(v *Value) bool { // match: (Rsh32x8 (Const32 [0]) _) // result: (Const32 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -19392,14 +18328,14 @@ func rewriteValuegeneric_OpRsh32x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux16 x (Const16 [c])) // result: (Rsh64Ux64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -19414,8 +18350,6 @@ func rewriteValuegeneric_OpRsh64Ux16_0(v *Value) bool { // match: (Rsh64Ux16 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19426,14 +18360,14 @@ func rewriteValuegeneric_OpRsh64Ux16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux32 x (Const32 [c])) // result: (Rsh64Ux64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -19448,8 +18382,6 @@ func rewriteValuegeneric_OpRsh64Ux32_0(v *Value) bool { // match: (Rsh64Ux32 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19460,18 +18392,17 @@ func rewriteValuegeneric_OpRsh64Ux32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux64 (Const64 [c]) (Const64 [d])) // result: (Const64 [int64(uint64(c) >> uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19483,9 +18414,7 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -19497,8 +18426,6 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19510,8 +18437,6 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // cond: uint64(c) >= 64 // result: (Const64 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19528,8 +18453,6 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // result: (Rsh64Ux64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh64Ux64 { break } @@ -19540,7 +18463,6 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19558,14 +18480,11 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 [63])) // result: (Rsh64Ux64 x (Const64 [63])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19584,8 +18503,6 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh64Ux64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } @@ -19606,7 +18523,6 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19624,19 +18540,13 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) // result: (ZeroExt8to64 (Trunc64to8 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 56 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 56 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 56 || v_1.Op != OpConst64 || v_1.AuxInt != 56 { break } v.reset(OpZeroExt8to64) @@ -19648,19 +18558,13 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) // result: (ZeroExt16to64 (Trunc64to16 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 48 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 48 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 48 || v_1.Op != OpConst64 || v_1.AuxInt != 48 { break } v.reset(OpZeroExt16to64) @@ -19672,19 +18576,13 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) // result: (ZeroExt32to64 (Trunc64to32 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 32 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 32 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 32 || v_1.Op != OpConst64 || v_1.AuxInt != 32 { break } v.reset(OpZeroExt32to64) @@ -19696,14 +18594,14 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux8 x (Const8 [c])) // result: (Rsh64Ux64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -19718,8 +18616,6 @@ func rewriteValuegeneric_OpRsh64Ux8_0(v *Value) bool { // match: (Rsh64Ux8 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19730,14 +18626,14 @@ func rewriteValuegeneric_OpRsh64Ux8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x16 x (Const16 [c])) // result: (Rsh64x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -19752,8 +18648,6 @@ func rewriteValuegeneric_OpRsh64x16_0(v *Value) bool { // match: (Rsh64x16 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19764,14 +18658,14 @@ func rewriteValuegeneric_OpRsh64x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x32 x (Const32 [c])) // result: (Rsh64x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -19786,8 +18680,6 @@ func rewriteValuegeneric_OpRsh64x32_0(v *Value) bool { // match: (Rsh64x32 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19798,18 +18690,17 @@ func rewriteValuegeneric_OpRsh64x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c >> uint64(d)]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19821,9 +18712,7 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -19835,8 +18724,6 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19849,8 +18736,6 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { // result: (Rsh64x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh64x64 { break } @@ -19861,7 +18746,6 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -19879,19 +18763,13 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) // result: (SignExt8to64 (Trunc64to8 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 56 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 56 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 56 || v_1.Op != OpConst64 || v_1.AuxInt != 56 { break } v.reset(OpSignExt8to64) @@ -19903,19 +18781,13 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) // result: (SignExt16to64 (Trunc64to16 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 48 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 48 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 48 || v_1.Op != OpConst64 || v_1.AuxInt != 48 { break } v.reset(OpSignExt16to64) @@ -19927,19 +18799,13 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { // match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) // result: (SignExt32to64 (Trunc64to32 x)) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 32 { - break - } - v_1 := v.Args[1] - if v_1.Op != OpConst64 || v_1.AuxInt != 32 { + if v_0_1.Op != OpConst64 || v_0_1.AuxInt != 32 || v_1.Op != OpConst64 || v_1.AuxInt != 32 { break } v.reset(OpSignExt32to64) @@ -19951,14 +18817,14 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh64x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh64x8 x (Const8 [c])) // result: (Rsh64x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -19973,8 +18839,6 @@ func rewriteValuegeneric_OpRsh64x8_0(v *Value) bool { // match: (Rsh64x8 (Const64 [0]) _) // result: (Const64 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -19985,14 +18849,14 @@ func rewriteValuegeneric_OpRsh64x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8Ux16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux16 x (Const16 [c])) // result: (Rsh8Ux64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -20007,8 +18871,6 @@ func rewriteValuegeneric_OpRsh8Ux16_0(v *Value) bool { // match: (Rsh8Ux16 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20019,14 +18881,14 @@ func rewriteValuegeneric_OpRsh8Ux16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8Ux32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux32 x (Const32 [c])) // result: (Rsh8Ux64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -20041,8 +18903,6 @@ func rewriteValuegeneric_OpRsh8Ux32_0(v *Value) bool { // match: (Rsh8Ux32 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20053,18 +18913,17 @@ func rewriteValuegeneric_OpRsh8Ux32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d])) // result: (Const8 [int64(int8(uint8(c) >> uint64(d)))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20076,9 +18935,7 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { // match: (Rsh8Ux64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -20090,8 +18947,6 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { // match: (Rsh8Ux64 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20103,8 +18958,6 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { // cond: uint64(c) >= 8 // result: (Const8 [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20121,8 +18974,6 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { // result: (Rsh8Ux64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh8Ux64 { break } @@ -20133,7 +18984,6 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20151,14 +19001,11 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7] )) // result: (Rsh8Ux64 x (Const64 [7] )) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh8x64 { break } _ = v_0.Args[1] x := v_0.Args[0] - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20177,8 +19024,6 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh8Ux64 x (Const64 [c1-c2+c3])) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLsh8x64 { break } @@ -20199,7 +19044,6 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { break } c2 := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20217,14 +19061,14 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8Ux8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux8 x (Const8 [c])) // result: (Rsh8Ux64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -20239,8 +19083,6 @@ func rewriteValuegeneric_OpRsh8Ux8_0(v *Value) bool { // match: (Rsh8Ux8 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20251,14 +19093,14 @@ func rewriteValuegeneric_OpRsh8Ux8_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8x16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x16 x (Const16 [c])) // result: (Rsh8x64 x (Const64 [int64(uint16(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -20273,8 +19115,6 @@ func rewriteValuegeneric_OpRsh8x16_0(v *Value) bool { // match: (Rsh8x16 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20285,14 +19125,14 @@ func rewriteValuegeneric_OpRsh8x16_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8x32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x32 x (Const32 [c])) // result: (Rsh8x64 x (Const64 [int64(uint32(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -20307,8 +19147,6 @@ func rewriteValuegeneric_OpRsh8x32_0(v *Value) bool { // match: (Rsh8x32 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20319,17 +19157,16 @@ func rewriteValuegeneric_OpRsh8x32_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8x64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x64 (Const8 [c]) (Const64 [d])) // result: (Const8 [int64(int8(c) >> uint64(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20341,9 +19178,7 @@ func rewriteValuegeneric_OpRsh8x64_0(v *Value) bool { // match: (Rsh8x64 x (Const64 [0])) // result: x for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 || v_1.AuxInt != 0 { break } @@ -20355,8 +19190,6 @@ func rewriteValuegeneric_OpRsh8x64_0(v *Value) bool { // match: (Rsh8x64 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20369,8 +19202,6 @@ func rewriteValuegeneric_OpRsh8x64_0(v *Value) bool { // result: (Rsh8x64 x (Const64 [c+d])) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpRsh8x64 { break } @@ -20381,7 +19212,6 @@ func rewriteValuegeneric_OpRsh8x64_0(v *Value) bool { break } c := v_0_1.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -20399,14 +19229,14 @@ func rewriteValuegeneric_OpRsh8x64_0(v *Value) bool { return false } func rewriteValuegeneric_OpRsh8x8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Rsh8x8 x (Const8 [c])) // result: (Rsh8x64 x (Const64 [int64(uint8(c))])) for { t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -20421,8 +19251,6 @@ func rewriteValuegeneric_OpRsh8x8_0(v *Value) bool { // match: (Rsh8x8 (Const8 [0]) _) // result: (Const8 [0]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 || v_0.AuxInt != 0 { break } @@ -20433,10 +19261,10 @@ func rewriteValuegeneric_OpRsh8x8_0(v *Value) bool { return false } func rewriteValuegeneric_OpSelect0_0(v *Value) bool { + v_0 := v.Args[0] // match: (Select0 (Div128u (Const64 [0]) lo y)) // result: (Div64u lo y) for { - v_0 := v.Args[0] if v_0.Op != OpDiv128u { break } @@ -20454,10 +19282,10 @@ func rewriteValuegeneric_OpSelect0_0(v *Value) bool { return false } func rewriteValuegeneric_OpSelect1_0(v *Value) bool { + v_0 := v.Args[0] // match: (Select1 (Div128u (Const64 [0]) lo y)) // result: (Mod64u lo y) for { - v_0 := v.Args[0] if v_0.Op != OpDiv128u { break } @@ -20475,10 +19303,10 @@ func rewriteValuegeneric_OpSelect1_0(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to32 (Const16 [c])) // result: (Const32 [int64( int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -20491,7 +19319,6 @@ func rewriteValuegeneric_OpSignExt16to32_0(v *Value) bool { // cond: s >= 16 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc32to16 { break } @@ -20516,10 +19343,10 @@ func rewriteValuegeneric_OpSignExt16to32_0(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt16to64 (Const16 [c])) // result: (Const64 [int64( int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -20532,7 +19359,6 @@ func rewriteValuegeneric_OpSignExt16to64_0(v *Value) bool { // cond: s >= 48 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc64to16 { break } @@ -20557,10 +19383,10 @@ func rewriteValuegeneric_OpSignExt16to64_0(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt32to64 (Const32 [c])) // result: (Const64 [int64( int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -20573,7 +19399,6 @@ func rewriteValuegeneric_OpSignExt32to64_0(v *Value) bool { // cond: s >= 32 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc64to32 { break } @@ -20598,10 +19423,10 @@ func rewriteValuegeneric_OpSignExt32to64_0(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to16 (Const8 [c])) // result: (Const16 [int64( int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -20614,7 +19439,6 @@ func rewriteValuegeneric_OpSignExt8to16_0(v *Value) bool { // cond: s >= 8 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc16to8 { break } @@ -20639,10 +19463,10 @@ func rewriteValuegeneric_OpSignExt8to16_0(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to32 (Const8 [c])) // result: (Const32 [int64( int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -20655,7 +19479,6 @@ func rewriteValuegeneric_OpSignExt8to32_0(v *Value) bool { // cond: s >= 24 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc32to8 { break } @@ -20680,10 +19503,10 @@ func rewriteValuegeneric_OpSignExt8to32_0(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (SignExt8to64 (Const8 [c])) // result: (Const64 [int64( int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -20696,7 +19519,6 @@ func rewriteValuegeneric_OpSignExt8to64_0(v *Value) bool { // cond: s >= 56 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc64to8 { break } @@ -20721,10 +19543,10 @@ func rewriteValuegeneric_OpSignExt8to64_0(v *Value) bool { return false } func rewriteValuegeneric_OpSliceCap_0(v *Value) bool { + v_0 := v.Args[0] // match: (SliceCap (SliceMake _ _ (Const64 [c]))) // result: (Const64 [c]) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20743,7 +19565,6 @@ func rewriteValuegeneric_OpSliceCap_0(v *Value) bool { // match: (SliceCap (SliceMake _ _ (Const32 [c]))) // result: (Const32 [c]) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20762,7 +19583,6 @@ func rewriteValuegeneric_OpSliceCap_0(v *Value) bool { // match: (SliceCap (SliceMake _ _ (SliceCap x))) // result: (SliceCap x) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20779,7 +19599,6 @@ func rewriteValuegeneric_OpSliceCap_0(v *Value) bool { // match: (SliceCap (SliceMake _ _ (SliceLen x))) // result: (SliceLen x) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20796,10 +19615,10 @@ func rewriteValuegeneric_OpSliceCap_0(v *Value) bool { return false } func rewriteValuegeneric_OpSliceLen_0(v *Value) bool { + v_0 := v.Args[0] // match: (SliceLen (SliceMake _ (Const64 [c]) _)) // result: (Const64 [c]) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20818,7 +19637,6 @@ func rewriteValuegeneric_OpSliceLen_0(v *Value) bool { // match: (SliceLen (SliceMake _ (Const32 [c]) _)) // result: (Const32 [c]) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20837,7 +19655,6 @@ func rewriteValuegeneric_OpSliceLen_0(v *Value) bool { // match: (SliceLen (SliceMake _ (SliceLen x) _)) // result: (SliceLen x) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20854,10 +19671,10 @@ func rewriteValuegeneric_OpSliceLen_0(v *Value) bool { return false } func rewriteValuegeneric_OpSlicePtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (SlicePtr (SliceMake (SlicePtr x) _ _)) // result: (SlicePtr x) for { - v_0 := v.Args[0] if v_0.Op != OpSliceMake { break } @@ -20874,11 +19691,11 @@ func rewriteValuegeneric_OpSlicePtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpSlicemask_0(v *Value) bool { + v_0 := v.Args[0] // match: (Slicemask (Const32 [x])) // cond: x > 0 // result: (Const32 [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -20893,7 +19710,6 @@ func rewriteValuegeneric_OpSlicemask_0(v *Value) bool { // match: (Slicemask (Const32 [0])) // result: (Const32 [0]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 || v_0.AuxInt != 0 { break } @@ -20905,7 +19721,6 @@ func rewriteValuegeneric_OpSlicemask_0(v *Value) bool { // cond: x > 0 // result: (Const64 [-1]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -20920,7 +19735,6 @@ func rewriteValuegeneric_OpSlicemask_0(v *Value) bool { // match: (Slicemask (Const64 [0])) // result: (Const64 [0]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 || v_0.AuxInt != 0 { break } @@ -20931,10 +19745,10 @@ func rewriteValuegeneric_OpSlicemask_0(v *Value) bool { return false } func rewriteValuegeneric_OpSqrt_0(v *Value) bool { + v_0 := v.Args[0] // match: (Sqrt (Const64F [c])) // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } @@ -20946,6 +19760,7 @@ func rewriteValuegeneric_OpSqrt_0(v *Value) bool { return false } func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) @@ -20953,7 +19768,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { // result: (Move {t.(*types.Type).Elem()} [sz] dst src mem) for { sym := v.Aux - s1 := v.Args[0] + s1 := v_0 if s1.Op != OpStore { break } @@ -20992,7 +19807,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { // result: (Move {t.(*types.Type).Elem()} [sz] dst src mem) for { sym := v.Aux - s1 := v.Args[0] + s1 := v_0 if s1.Op != OpStore { break } @@ -21031,7 +19846,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { // result: x for { sym := v.Aux - x := v.Args[0] + x := v_0 if !(needRaceCleanup(sym, v)) { break } @@ -21043,22 +19858,23 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { return false } func rewriteValuegeneric_OpStore_0(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Store {t1} p1 (Load p2 mem) mem) // cond: isSamePtr(p1, p2) && t2.Size() == sizeof(t1) // result: mem for { t1 := v.Aux - mem := v.Args[2] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpLoad { break } t2 := v_1.Type - _ = v_1.Args[1] + mem := v_1.Args[1] p2 := v_1.Args[0] - if mem != v_1.Args[1] || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1)) { + if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1)) { break } v.reset(OpCopy) @@ -21071,16 +19887,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t1 := v.Aux - _ = v.Args[2] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpLoad { break } t2 := v_1.Type oldmem := v_1.Args[1] p2 := v_1.Args[0] - mem := v.Args[2] + mem := v_2 if mem.Op != OpStore { break } @@ -21100,16 +19914,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t1 := v.Aux - _ = v.Args[2] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpLoad { break } t2 := v_1.Type oldmem := v_1.Args[1] p2 := v_1.Args[0] - mem := v.Args[2] + mem := v_2 if mem.Op != OpStore { break } @@ -21136,16 +19948,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t1 := v.Aux - _ = v.Args[2] - p1 := v.Args[0] - v_1 := v.Args[1] + p1 := v_0 if v_1.Op != OpLoad { break } t2 := v_1.Type oldmem := v_1.Args[1] p2 := v_1.Args[0] - mem := v.Args[2] + mem := v_2 if mem.Op != OpStore { break } @@ -21179,15 +19989,13 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } o := v_0.AuxInt p1 := v_0.Args[0] - x := v.Args[1] - mem := v.Args[2] + x := v_1 + mem := v_2 if mem.Op != OpZero { break } @@ -21207,15 +20015,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t1 := v.Aux - _ = v.Args[2] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - x := v.Args[1] - mem := v.Args[2] + x := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -21242,15 +20049,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t1 := v.Aux - _ = v.Args[2] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - x := v.Args[1] - mem := v.Args[2] + x := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -21284,15 +20090,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // result: mem for { t1 := v.Aux - _ = v.Args[2] - op := v.Args[0] + op := v_0 if op.Op != OpOffPtr { break } o1 := op.AuxInt p1 := op.Args[0] - x := v.Args[1] - mem := v.Args[2] + x := v_1 + mem := v_2 if mem.Op != OpStore { break } @@ -21331,11 +20136,10 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // match: (Store _ (StructMake0) mem) // result: mem for { - mem := v.Args[2] - v_1 := v.Args[1] if v_1.Op != OpStructMake0 { break } + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -21344,14 +20148,13 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { // match: (Store dst (StructMake1 f0) mem) // result: (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpStructMake1 { break } t := v_1.Type f0 := v_1.Args[0] + mem := v_2 v.reset(OpStore) v.Aux = t.FieldType(0) v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) @@ -21365,21 +20168,23 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { return false } func rewriteValuegeneric_OpStore_10(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config fe := b.Func.fe // match: (Store dst (StructMake2 f0 f1) mem) // result: (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpStructMake2 { break } t := v_1.Type f1 := v_1.Args[1] f0 := v_1.Args[0] + mem := v_2 v.reset(OpStore) v.Aux = t.FieldType(1) v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) @@ -21401,9 +20206,7 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // match: (Store dst (StructMake3 f0 f1 f2) mem) // result: (Store {t.FieldType(2)} (OffPtr [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem))) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpStructMake3 { break } @@ -21411,6 +20214,7 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { f2 := v_1.Args[2] f0 := v_1.Args[0] f1 := v_1.Args[1] + mem := v_2 v.reset(OpStore) v.Aux = t.FieldType(2) v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) @@ -21440,9 +20244,7 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // match: (Store dst (StructMake4 f0 f1 f2 f3) mem) // result: (Store {t.FieldType(3)} (OffPtr [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)))) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpStructMake4 { break } @@ -21451,6 +20253,7 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { f0 := v_1.Args[0] f1 := v_1.Args[1] f2 := v_1.Args[2] + mem := v_2 v.reset(OpStore) v.Aux = t.FieldType(3) v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) @@ -21490,15 +20293,13 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // result: (Move {t} [sizeof(t)] dst src mem) for { t := v.Aux - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpLoad { break } - _ = v_1.Args[1] + mem := v_1.Args[1] src := v_1.Args[0] - if mem != v_1.Args[1] || !(!fe.CanSSA(t.(*types.Type))) { + if mem != v_2 || !(!fe.CanSSA(t.(*types.Type))) { break } v.reset(OpMove) @@ -21514,15 +20315,12 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // result: (Move {t} [sizeof(t)] dst src (VarDef {x} mem)) for { t := v.Aux - _ = v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpLoad { break } mem := v_1.Args[1] src := v_1.Args[0] - v_2 := v.Args[2] if v_2.Op != OpVarDef { break } @@ -21544,11 +20342,10 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // match: (Store _ (ArrayMake0) mem) // result: mem for { - mem := v.Args[2] - v_1 := v.Args[1] if v_1.Op != OpArrayMake0 { break } + mem := v_2 v.reset(OpCopy) v.Type = mem.Type v.AddArg(mem) @@ -21557,13 +20354,12 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // match: (Store dst (ArrayMake1 e) mem) // result: (Store {e.Type} dst e mem) for { - mem := v.Args[2] - dst := v.Args[0] - v_1 := v.Args[1] + dst := v_0 if v_1.Op != OpArrayMake1 { break } e := v_1.Args[0] + mem := v_2 v.reset(OpStore) v.Aux = e.Type v.AddArg(dst) @@ -21575,23 +20371,21 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // result: mem for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpLoad { break } - _ = v_0.Args[1] + mem := v_0.Args[1] v_0_0 := v_0.Args[0] if v_0_0.Op != OpOffPtr { break } c := v_0_0.AuxInt v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSP || mem != v_0.Args[1] { + if v_0_0_0.Op != OpSP { break } - x := v.Args[1] - if !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { + x := v_1 + if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } v.reset(OpCopy) @@ -21603,8 +20397,6 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // result: mem for { - mem := v.Args[2] - v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } @@ -21612,18 +20404,18 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { if v_0_0.Op != OpLoad { break } - _ = v_0_0.Args[1] + mem := v_0_0.Args[1] v_0_0_0 := v_0_0.Args[0] if v_0_0_0.Op != OpOffPtr { break } c := v_0_0_0.AuxInt v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpSP || mem != v_0_0.Args[1] { + if v_0_0_0_0.Op != OpSP { break } - x := v.Args[1] - if !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { + x := v_1 + if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } v.reset(OpCopy) @@ -21636,15 +20428,14 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) for { t1 := v.Aux - _ = v.Args[2] - op1 := v.Args[0] + op1 := v_0 if op1.Op != OpOffPtr { break } o1 := op1.AuxInt p1 := op1.Args[0] - d1 := v.Args[1] - m2 := v.Args[2] + d1 := v_1 + m2 := v_2 if m2.Op != OpStore { break } @@ -21681,21 +20472,23 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { return false } func rewriteValuegeneric_OpStore_20(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem)))) // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t3) + sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2) && clobber(m3) && clobber(m4) // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) for { t1 := v.Aux - _ = v.Args[2] - op1 := v.Args[0] + op1 := v_0 if op1.Op != OpOffPtr { break } o1 := op1.AuxInt p1 := op1.Args[0] - d1 := v.Args[1] - m2 := v.Args[2] + d1 := v_1 + m2 := v_2 if m2.Op != OpStore { break } @@ -21752,15 +20545,14 @@ func rewriteValuegeneric_OpStore_20(v *Value) bool { // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) for { t1 := v.Aux - _ = v.Args[2] - op1 := v.Args[0] + op1 := v_0 if op1.Op != OpOffPtr { break } o1 := op1.AuxInt p1 := op1.Args[0] - d1 := v.Args[1] - m2 := v.Args[2] + d1 := v_1 + m2 := v_2 if m2.Op != OpStore { break } @@ -21835,15 +20627,14 @@ func rewriteValuegeneric_OpStore_20(v *Value) bool { // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) for { t1 := v.Aux - _ = v.Args[2] - op1 := v.Args[0] + op1 := v_0 if op1.Op != OpOffPtr { break } o1 := op1.AuxInt p1 := op1.Args[0] - d1 := v.Args[1] - m2 := v.Args[2] + d1 := v_1 + m2 := v_2 if m2.Op != OpStore { break } @@ -21882,15 +20673,14 @@ func rewriteValuegeneric_OpStore_20(v *Value) bool { // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) for { t1 := v.Aux - _ = v.Args[2] - op1 := v.Args[0] + op1 := v_0 if op1.Op != OpOffPtr { break } o1 := op1.AuxInt p1 := op1.Args[0] - d1 := v.Args[1] - m2 := v.Args[2] + d1 := v_1 + m2 := v_2 if m2.Op != OpStore { break } @@ -21947,15 +20737,14 @@ func rewriteValuegeneric_OpStore_20(v *Value) bool { // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) for { t1 := v.Aux - _ = v.Args[2] - op1 := v.Args[0] + op1 := v_0 if op1.Op != OpOffPtr { break } o1 := op1.AuxInt p1 := op1.Args[0] - d1 := v.Args[1] - m2 := v.Args[2] + d1 := v_1 + m2 := v_2 if m2.Op != OpStore { break } @@ -22028,10 +20817,10 @@ func rewriteValuegeneric_OpStore_20(v *Value) bool { return false } func rewriteValuegeneric_OpStringLen_0(v *Value) bool { + v_0 := v.Args[0] // match: (StringLen (StringMake _ (Const64 [c]))) // result: (Const64 [c]) for { - v_0 := v.Args[0] if v_0.Op != OpStringMake { break } @@ -22050,10 +20839,10 @@ func rewriteValuegeneric_OpStringLen_0(v *Value) bool { return false } func rewriteValuegeneric_OpStringPtr_0(v *Value) bool { + v_0 := v.Args[0] // match: (StringPtr (StringMake (Addr {s} base) _)) // result: (Addr {s} base) for { - v_0 := v.Args[0] if v_0.Op != OpStringMake { break } @@ -22074,10 +20863,10 @@ func rewriteValuegeneric_OpStringPtr_0(v *Value) bool { return false } func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { + v_0 := v.Args[0] // match: (StructSelect (StructMake1 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpStructMake1 { break } @@ -22090,11 +20879,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [0] (StructMake2 x _)) // result: x for { - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake2 { + if v.AuxInt != 0 || v_0.Op != OpStructMake2 { break } _ = v_0.Args[1] @@ -22107,11 +20892,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [1] (StructMake2 _ x)) // result: x for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake2 { + if v.AuxInt != 1 || v_0.Op != OpStructMake2 { break } x := v_0.Args[1] @@ -22123,11 +20904,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [0] (StructMake3 x _ _)) // result: x for { - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake3 { + if v.AuxInt != 0 || v_0.Op != OpStructMake3 { break } _ = v_0.Args[2] @@ -22140,11 +20917,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [1] (StructMake3 _ x _)) // result: x for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake3 { + if v.AuxInt != 1 || v_0.Op != OpStructMake3 { break } _ = v_0.Args[2] @@ -22157,11 +20930,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [2] (StructMake3 _ _ x)) // result: x for { - if v.AuxInt != 2 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake3 { + if v.AuxInt != 2 || v_0.Op != OpStructMake3 { break } x := v_0.Args[2] @@ -22173,11 +20942,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [0] (StructMake4 x _ _ _)) // result: x for { - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake4 { + if v.AuxInt != 0 || v_0.Op != OpStructMake4 { break } _ = v_0.Args[3] @@ -22190,11 +20955,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [1] (StructMake4 _ x _ _)) // result: x for { - if v.AuxInt != 1 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake4 { + if v.AuxInt != 1 || v_0.Op != OpStructMake4 { break } _ = v_0.Args[3] @@ -22207,11 +20968,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [2] (StructMake4 _ _ x _)) // result: x for { - if v.AuxInt != 2 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake4 { + if v.AuxInt != 2 || v_0.Op != OpStructMake4 { break } _ = v_0.Args[3] @@ -22224,11 +20981,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { // match: (StructSelect [3] (StructMake4 _ _ _ x)) // result: x for { - if v.AuxInt != 3 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpStructMake4 { + if v.AuxInt != 3 || v_0.Op != OpStructMake4 { break } x := v_0.Args[3] @@ -22240,6 +20993,7 @@ func rewriteValuegeneric_OpStructSelect_0(v *Value) bool { return false } func rewriteValuegeneric_OpStructSelect_10(v *Value) bool { + v_0 := v.Args[0] b := v.Block fe := b.Func.fe // match: (StructSelect [i] x:(Load ptr mem)) @@ -22247,7 +21001,7 @@ func rewriteValuegeneric_OpStructSelect_10(v *Value) bool { // result: @x.Block (Load (OffPtr [t.FieldOff(int(i))] ptr) mem) for { i := v.AuxInt - x := v.Args[0] + x := v_0 if x.Op != OpLoad { break } @@ -22271,11 +21025,7 @@ func rewriteValuegeneric_OpStructSelect_10(v *Value) bool { // match: (StructSelect [0] (IData x)) // result: (IData x) for { - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpIData { + if v.AuxInt != 0 || v_0.Op != OpIData { break } x := v_0.Args[0] @@ -22286,17 +21036,16 @@ func rewriteValuegeneric_OpStructSelect_10(v *Value) bool { return false } func rewriteValuegeneric_OpSub16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Sub16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c-d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst16 { break } @@ -22309,9 +21058,7 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // cond: x.Op != OpConst16 // result: (Add16 (Const16 [int64(int16(-c))]) x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst16 { break } @@ -22331,25 +21078,26 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // result: (Mul16 x (Sub16 y z)) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMul16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - y := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i1] + z := v_1_1 v.reset(OpMul16) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub16, t) @@ -22364,8 +21112,8 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // match: (Sub16 x x) // result: (Const16 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst16) @@ -22375,17 +21123,18 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // match: (Sub16 (Add16 x y) x) // result: y for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - if x != v_0.Args[_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { continue } - y := v_0.Args[1^_i0] v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -22396,15 +21145,16 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // match: (Sub16 (Add16 x y) y) // result: x for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - if y != v_0.Args[1^_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { continue } v.reset(OpCopy) @@ -22418,9 +21168,7 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Sub16 (Add16 x z) i) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub16 { break } @@ -22445,9 +21193,7 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Sub16 x z)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub16 { break } @@ -22472,14 +21218,11 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // match: (Sub16 (Const16 [c]) (Sub16 x (Const16 [d]))) // result: (Sub16 (Const16 [int64(int16(c+d))]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub16 { break } @@ -22500,14 +21243,11 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { // match: (Sub16 (Const16 [c]) (Sub16 (Const16 [d]) x)) // result: (Add16 (Const16 [int64(int16(c-d))]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub16 { break } @@ -22527,17 +21267,16 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { return false } func rewriteValuegeneric_OpSub32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Sub32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c-d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32 { break } @@ -22550,9 +21289,7 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // cond: x.Op != OpConst32 // result: (Add32 (Const32 [int64(int32(-c))]) x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst32 { break } @@ -22572,25 +21309,26 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // result: (Mul32 x (Sub32 y z)) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMul32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - y := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i1] + z := v_1_1 v.reset(OpMul32) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub32, t) @@ -22605,8 +21343,8 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // match: (Sub32 x x) // result: (Const32 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst32) @@ -22616,17 +21354,18 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // match: (Sub32 (Add32 x y) x) // result: y for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - if x != v_0.Args[_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { continue } - y := v_0.Args[1^_i0] v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -22637,15 +21376,16 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // match: (Sub32 (Add32 x y) y) // result: x for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - if y != v_0.Args[1^_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { continue } v.reset(OpCopy) @@ -22659,9 +21399,7 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Sub32 (Add32 x z) i) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub32 { break } @@ -22686,9 +21424,7 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Sub32 x z)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub32 { break } @@ -22713,14 +21449,11 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // match: (Sub32 (Const32 [c]) (Sub32 x (Const32 [d]))) // result: (Sub32 (Const32 [int64(int32(c+d))]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub32 { break } @@ -22741,14 +21474,11 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { // match: (Sub32 (Const32 [c]) (Sub32 (Const32 [d]) x)) // result: (Add32 (Const32 [int64(int32(c-d))]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub32 { break } @@ -22768,16 +21498,15 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { return false } func rewriteValuegeneric_OpSub32F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub32F (Const32F [c]) (Const32F [d])) // result: (Const32F [auxFrom32F(auxTo32F(c) - auxTo32F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst32F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst32F { break } @@ -22789,17 +21518,16 @@ func rewriteValuegeneric_OpSub32F_0(v *Value) bool { return false } func rewriteValuegeneric_OpSub64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Sub64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c-d]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64 { break } @@ -22812,9 +21540,7 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // cond: x.Op != OpConst64 // result: (Add64 (Const64 [-c]) x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst64 { break } @@ -22834,25 +21560,26 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // result: (Mul64 x (Sub64 y z)) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMul64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - y := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i1] + z := v_1_1 v.reset(OpMul64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub64, t) @@ -22867,8 +21594,8 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // match: (Sub64 x x) // result: (Const64 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst64) @@ -22878,17 +21605,18 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // match: (Sub64 (Add64 x y) x) // result: y for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - if x != v_0.Args[_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { continue } - y := v_0.Args[1^_i0] v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -22899,15 +21627,16 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // match: (Sub64 (Add64 x y) y) // result: x for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - if y != v_0.Args[1^_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { continue } v.reset(OpCopy) @@ -22921,9 +21650,7 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Sub64 (Add64 x z) i) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub64 { break } @@ -22948,9 +21675,7 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Sub64 x z)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub64 { break } @@ -22975,14 +21700,11 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // match: (Sub64 (Const64 [c]) (Sub64 x (Const64 [d]))) // result: (Sub64 (Const64 [c+d]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub64 { break } @@ -23003,14 +21725,11 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { // match: (Sub64 (Const64 [c]) (Sub64 (Const64 [d]) x)) // result: (Add64 (Const64 [c-d]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub64 { break } @@ -23030,16 +21749,15 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { return false } func rewriteValuegeneric_OpSub64F_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] // match: (Sub64F (Const64F [c]) (Const64F [d])) // result: (Const64F [auxFrom64F(auxTo64F(c) - auxTo64F(d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst64F { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst64F { break } @@ -23051,17 +21769,16 @@ func rewriteValuegeneric_OpSub64F_0(v *Value) bool { return false } func rewriteValuegeneric_OpSub8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Sub8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c-d))]) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpConst8 { break } @@ -23074,9 +21791,7 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // cond: x.Op != OpConst8 // result: (Add8 (Const8 [int64(int8(-c))]) x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpConst8 { break } @@ -23096,25 +21811,26 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // result: (Mul8 x (Sub8 y z)) for { t := v.Type - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpMul8 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - y := v_0.Args[1^_i0] - v_1 := v.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 if v_1.Op != OpMul8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - z := v_1.Args[1^_i1] + z := v_1_1 v.reset(OpMul8) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpSub8, t) @@ -23129,8 +21845,8 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // match: (Sub8 x x) // result: (Const8 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst8) @@ -23140,17 +21856,18 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // match: (Sub8 (Add8 x y) x) // result: y for { - x := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd8 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - if x != v_0.Args[_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { continue } - y := v_0.Args[1^_i0] v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -23161,15 +21878,16 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // match: (Sub8 (Add8 x y) y) // result: x for { - y := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpAdd8 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v_0.Args[_i0] - if y != v_0.Args[1^_i0] { + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { continue } v.reset(OpCopy) @@ -23183,9 +21901,7 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Sub8 (Add8 x z) i) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub8 { break } @@ -23210,9 +21926,7 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Sub8 x z)) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] + x := v_0 if v_1.Op != OpSub8 { break } @@ -23237,14 +21951,11 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // match: (Sub8 (Const8 [c]) (Sub8 x (Const8 [d]))) // result: (Sub8 (Const8 [int64(int8(c+d))]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub8 { break } @@ -23265,14 +21976,11 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // result: (Add8 (Const8 [int64(int8(c-d))]) x) for { - _ = v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] if v_1.Op != OpSub8 { break } @@ -23292,10 +22000,10 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc16to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc16to8 (Const16 [c])) // result: (Const8 [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -23307,7 +22015,6 @@ func rewriteValuegeneric_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 (ZeroExt8to16 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to16 { break } @@ -23320,7 +22027,6 @@ func rewriteValuegeneric_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 (SignExt8to16 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpSignExt8to16 { break } @@ -23334,18 +22040,18 @@ func rewriteValuegeneric_OpTrunc16to8_0(v *Value) bool { // cond: y&0xFF == 0xFF // result: (Trunc16to8 x) for { - v_0 := v.Args[0] if v_0.Op != OpAnd16 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst16 { continue } y := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(y&0xFF == 0xFF) { continue } @@ -23358,10 +22064,10 @@ func rewriteValuegeneric_OpTrunc16to8_0(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to16 (Const32 [c])) // result: (Const16 [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -23373,7 +22079,6 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { // match: (Trunc32to16 (ZeroExt8to32 x)) // result: (ZeroExt8to16 x) for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to32 { break } @@ -23385,7 +22090,6 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { // match: (Trunc32to16 (ZeroExt16to32 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt16to32 { break } @@ -23398,7 +22102,6 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { // match: (Trunc32to16 (SignExt8to32 x)) // result: (SignExt8to16 x) for { - v_0 := v.Args[0] if v_0.Op != OpSignExt8to32 { break } @@ -23410,7 +22113,6 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { // match: (Trunc32to16 (SignExt16to32 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpSignExt16to32 { break } @@ -23424,18 +22126,18 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { // cond: y&0xFFFF == 0xFFFF // result: (Trunc32to16 x) for { - v_0 := v.Args[0] if v_0.Op != OpAnd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst32 { continue } y := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(y&0xFFFF == 0xFFFF) { continue } @@ -23448,10 +22150,10 @@ func rewriteValuegeneric_OpTrunc32to16_0(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc32to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc32to8 (Const32 [c])) // result: (Const8 [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -23463,7 +22165,6 @@ func rewriteValuegeneric_OpTrunc32to8_0(v *Value) bool { // match: (Trunc32to8 (ZeroExt8to32 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to32 { break } @@ -23476,7 +22177,6 @@ func rewriteValuegeneric_OpTrunc32to8_0(v *Value) bool { // match: (Trunc32to8 (SignExt8to32 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpSignExt8to32 { break } @@ -23490,18 +22190,18 @@ func rewriteValuegeneric_OpTrunc32to8_0(v *Value) bool { // cond: y&0xFF == 0xFF // result: (Trunc32to8 x) for { - v_0 := v.Args[0] if v_0.Op != OpAnd32 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst32 { continue } y := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(y&0xFF == 0xFF) { continue } @@ -23514,10 +22214,10 @@ func rewriteValuegeneric_OpTrunc32to8_0(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to16 (Const64 [c])) // result: (Const16 [int64(int16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -23529,7 +22229,6 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { // match: (Trunc64to16 (ZeroExt8to64 x)) // result: (ZeroExt8to16 x) for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to64 { break } @@ -23541,7 +22240,6 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { // match: (Trunc64to16 (ZeroExt16to64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt16to64 { break } @@ -23554,7 +22252,6 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { // match: (Trunc64to16 (SignExt8to64 x)) // result: (SignExt8to16 x) for { - v_0 := v.Args[0] if v_0.Op != OpSignExt8to64 { break } @@ -23566,7 +22263,6 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { // match: (Trunc64to16 (SignExt16to64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpSignExt16to64 { break } @@ -23580,18 +22276,18 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { // cond: y&0xFFFF == 0xFFFF // result: (Trunc64to16 x) for { - v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst64 { continue } y := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(y&0xFFFF == 0xFFFF) { continue } @@ -23604,10 +22300,10 @@ func rewriteValuegeneric_OpTrunc64to16_0(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to32 (Const64 [c])) // result: (Const32 [int64(int32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -23619,7 +22315,6 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (ZeroExt8to64 x)) // result: (ZeroExt8to32 x) for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to64 { break } @@ -23631,7 +22326,6 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (ZeroExt16to64 x)) // result: (ZeroExt16to32 x) for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt16to64 { break } @@ -23643,7 +22337,6 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (ZeroExt32to64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt32to64 { break } @@ -23656,7 +22349,6 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (SignExt8to64 x)) // result: (SignExt8to32 x) for { - v_0 := v.Args[0] if v_0.Op != OpSignExt8to64 { break } @@ -23668,7 +22360,6 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (SignExt16to64 x)) // result: (SignExt16to32 x) for { - v_0 := v.Args[0] if v_0.Op != OpSignExt16to64 { break } @@ -23680,7 +22371,6 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // match: (Trunc64to32 (SignExt32to64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpSignExt32to64 { break } @@ -23694,18 +22384,18 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { // cond: y&0xFFFFFFFF == 0xFFFFFFFF // result: (Trunc64to32 x) for { - v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst64 { continue } y := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(y&0xFFFFFFFF == 0xFFFFFFFF) { continue } @@ -23718,10 +22408,10 @@ func rewriteValuegeneric_OpTrunc64to32_0(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc64to8_0(v *Value) bool { + v_0 := v.Args[0] // match: (Trunc64to8 (Const64 [c])) // result: (Const8 [int64(int8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst64 { break } @@ -23733,7 +22423,6 @@ func rewriteValuegeneric_OpTrunc64to8_0(v *Value) bool { // match: (Trunc64to8 (ZeroExt8to64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpZeroExt8to64 { break } @@ -23746,7 +22435,6 @@ func rewriteValuegeneric_OpTrunc64to8_0(v *Value) bool { // match: (Trunc64to8 (SignExt8to64 x)) // result: x for { - v_0 := v.Args[0] if v_0.Op != OpSignExt8to64 { break } @@ -23760,18 +22448,18 @@ func rewriteValuegeneric_OpTrunc64to8_0(v *Value) bool { // cond: y&0xFF == 0xFF // result: (Trunc64to8 x) for { - v_0 := v.Args[0] if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0_0 := v_0.Args[_i0] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { if v_0_0.Op != OpConst64 { continue } y := v_0_0.AuxInt - x := v_0.Args[1^_i0] + x := v_0_1 if !(y&0xFF == 0xFF) { continue } @@ -23784,18 +22472,17 @@ func rewriteValuegeneric_OpTrunc64to8_0(v *Value) bool { return false } func rewriteValuegeneric_OpXor16_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Xor16 (Const16 [c]) (Const16 [d])) // result: (Const16 [int64(int16(c^d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst16 { continue } @@ -23809,8 +22496,8 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { // match: (Xor16 x x) // result: (Const16 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst16) @@ -23820,13 +22507,11 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { // match: (Xor16 (Const16 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -23837,19 +22522,19 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { // match: (Xor16 x (Xor16 x y)) // result: y for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpXor16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -23862,21 +22547,21 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Xor16 i (Xor16 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpXor16 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst16 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst16 && x.Op != OpConst16) { continue } @@ -23894,26 +22579,24 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) // result: (Xor16 (Const16 [int64(int16(c^d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst16 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpXor16 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst16 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpXor16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c ^ d)) @@ -23927,18 +22610,17 @@ func rewriteValuegeneric_OpXor16_0(v *Value) bool { return false } func rewriteValuegeneric_OpXor32_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Xor32 (Const32 [c]) (Const32 [d])) // result: (Const32 [int64(int32(c^d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst32 { continue } @@ -23952,8 +22634,8 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { // match: (Xor32 x x) // result: (Const32 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst32) @@ -23963,13 +22645,11 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { // match: (Xor32 (Const32 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -23980,19 +22660,19 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { // match: (Xor32 x (Xor32 x y)) // result: y for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpXor32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -24005,21 +22685,21 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Xor32 i (Xor32 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpXor32 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst32 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst32 && x.Op != OpConst32) { continue } @@ -24037,26 +22717,24 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) // result: (Xor32 (Const32 [int64(int32(c^d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst32 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpXor32 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst32 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpXor32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c ^ d)) @@ -24070,18 +22748,17 @@ func rewriteValuegeneric_OpXor32_0(v *Value) bool { return false } func rewriteValuegeneric_OpXor64_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Xor64 (Const64 [c]) (Const64 [d])) // result: (Const64 [c^d]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst64 { continue } @@ -24095,8 +22772,8 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { // match: (Xor64 x x) // result: (Const64 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst64) @@ -24106,13 +22783,11 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { // match: (Xor64 (Const64 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -24123,19 +22798,19 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { // match: (Xor64 x (Xor64 x y)) // result: y for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpXor64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -24148,21 +22823,21 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Xor64 i (Xor64 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpXor64 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst64 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } @@ -24180,26 +22855,24 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) // result: (Xor64 (Const64 [c^d]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst64 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpXor64 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpXor64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c ^ d @@ -24213,18 +22886,17 @@ func rewriteValuegeneric_OpXor64_0(v *Value) bool { return false } func rewriteValuegeneric_OpXor8_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block // match: (Xor8 (Const8 [c]) (Const8 [d])) // result: (Const8 [int64(int8(c^d))]) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpConst8 { continue } @@ -24238,8 +22910,8 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { // match: (Xor8 x x) // result: (Const8 [0]) for { - x := v.Args[1] - if x != v.Args[0] { + x := v_0 + if x != v_1 { break } v.reset(OpConst8) @@ -24249,13 +22921,11 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { // match: (Xor8 (Const8 [0]) x) // result: x for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 || v_0.AuxInt != 0 { continue } - x := v.Args[1^_i0] + x := v_1 v.reset(OpCopy) v.Type = x.Type v.AddArg(x) @@ -24266,19 +22936,19 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { // match: (Xor8 x (Xor8 x y)) // result: y for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - x := v.Args[_i0] - v_1 := v.Args[1^_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 if v_1.Op != OpXor8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - if x != v_1.Args[_i1] { + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { continue } - y := v_1.Args[1^_i1] + y := v_1_1 v.reset(OpCopy) v.Type = y.Type v.AddArg(y) @@ -24291,21 +22961,21 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Xor8 i (Xor8 z x)) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpXor8 { continue } _ = v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - i := v_0.Args[_i1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 if i.Op != OpConst8 { continue } t := i.Type - z := v_0.Args[1^_i1] - x := v.Args[1^_i0] + z := v_0_1 + x := v_1 if !(z.Op != OpConst8 && x.Op != OpConst8) { continue } @@ -24323,26 +22993,24 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { // match: (Xor8 (Const8 [c]) (Xor8 (Const8 [d]) x)) // result: (Xor8 (Const8 [int64(int8(c^d))]) x) for { - _ = v.Args[1] - for _i0 := 0; _i0 <= 1; _i0++ { - v_0 := v.Args[_i0] + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpConst8 { continue } t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1^_i0] if v_1.Op != OpXor8 { continue } _ = v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1++ { - v_1_0 := v_1.Args[_i1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { if v_1_0.Op != OpConst8 || v_1_0.Type != t { continue } d := v_1_0.AuxInt - x := v_1.Args[1^_i1] + x := v_1_1 v.reset(OpXor8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c ^ d)) @@ -24356,25 +23024,25 @@ func rewriteValuegeneric_OpXor8_0(v *Value) bool { return false } func rewriteValuegeneric_OpZero_0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (Zero (Load (OffPtr [c] (SP)) mem) mem) // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // result: mem for { - mem := v.Args[1] - v_0 := v.Args[0] if v_0.Op != OpLoad { break } - _ = v_0.Args[1] + mem := v_0.Args[1] v_0_0 := v_0.Args[0] if v_0_0.Op != OpOffPtr { break } c := v_0_0.AuxInt v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSP || mem != v_0.Args[1] || !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { + if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } v.reset(OpCopy) @@ -24388,9 +23056,8 @@ func rewriteValuegeneric_OpZero_0(v *Value) bool { for { n := v.AuxInt t1 := v.Aux - _ = v.Args[1] - p1 := v.Args[0] - store := v.Args[1] + p1 := v_0 + store := v_1 if store.Op != OpStore { break } @@ -24418,9 +23085,8 @@ func rewriteValuegeneric_OpZero_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[1] - dst1 := v.Args[0] - move := v.Args[1] + dst1 := v_0 + move := v_1 if move.Op != OpMove || move.AuxInt != n || move.Aux != t { break } @@ -24442,9 +23108,8 @@ func rewriteValuegeneric_OpZero_0(v *Value) bool { for { n := v.AuxInt t := v.Aux - _ = v.Args[1] - dst1 := v.Args[0] - vardef := v.Args[1] + dst1 := v_0 + vardef := v_1 if vardef.Op != OpVarDef { break } @@ -24471,10 +23136,10 @@ func rewriteValuegeneric_OpZero_0(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt16to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to32 (Const16 [c])) // result: (Const32 [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -24487,7 +23152,6 @@ func rewriteValuegeneric_OpZeroExt16to32_0(v *Value) bool { // cond: s >= 16 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc32to16 { break } @@ -24512,10 +23176,10 @@ func rewriteValuegeneric_OpZeroExt16to32_0(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt16to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt16to64 (Const16 [c])) // result: (Const64 [int64(uint16(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst16 { break } @@ -24528,7 +23192,6 @@ func rewriteValuegeneric_OpZeroExt16to64_0(v *Value) bool { // cond: s >= 48 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc64to16 { break } @@ -24553,10 +23216,10 @@ func rewriteValuegeneric_OpZeroExt16to64_0(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt32to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt32to64 (Const32 [c])) // result: (Const64 [int64(uint32(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst32 { break } @@ -24569,7 +23232,6 @@ func rewriteValuegeneric_OpZeroExt32to64_0(v *Value) bool { // cond: s >= 32 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc64to32 { break } @@ -24594,10 +23256,10 @@ func rewriteValuegeneric_OpZeroExt32to64_0(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt8to16_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to16 (Const8 [c])) // result: (Const16 [int64( uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -24610,7 +23272,6 @@ func rewriteValuegeneric_OpZeroExt8to16_0(v *Value) bool { // cond: s >= 8 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc16to8 { break } @@ -24635,10 +23296,10 @@ func rewriteValuegeneric_OpZeroExt8to16_0(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt8to32_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to32 (Const8 [c])) // result: (Const32 [int64( uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -24651,7 +23312,6 @@ func rewriteValuegeneric_OpZeroExt8to32_0(v *Value) bool { // cond: s >= 24 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc32to8 { break } @@ -24676,10 +23336,10 @@ func rewriteValuegeneric_OpZeroExt8to32_0(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt8to64_0(v *Value) bool { + v_0 := v.Args[0] // match: (ZeroExt8to64 (Const8 [c])) // result: (Const64 [int64( uint8(c))]) for { - v_0 := v.Args[0] if v_0.Op != OpConst8 { break } @@ -24692,7 +23352,6 @@ func rewriteValuegeneric_OpZeroExt8to64_0(v *Value) bool { // cond: s >= 56 // result: x for { - v_0 := v.Args[0] if v_0.Op != OpTrunc64to8 { break }