cmd/compile: use loops to handle commutative ops in rules

Prior to this change, we generated additional rules at rulegen time
for all possible combinations of args to commutative ops.
This is simple and works well, but leads to lots of generated rules.
This in turn has increased the size of the compiler,
made it hard to compile package ssa on small machines,
and provided a disincentive to mark some ops as commutative.

This change reworks how we handle commutative ops.
Instead of generating a rule per argument permutation,
we generate a series of nested loops, one for each commutative op.
Each loop tries both possible argument orderings.

I also considered attempting to canonicalize the inputs to the
rewrite rules. However, because either or both arguments might be
nothing more than an identifier, and because there can be arbitrary
conditions to evaluate during matching, I did not see how to proceed.

The duplicate rule detection now sorts arguments to commutative ops,
so that it can detect commutative-only duplicates.

There may be further optimizations to the new generated code.
In particular, we may not be removing as many bounds checks as before;
I have not investigated deeply. If more work here is needed,
we could do it with more hints or with improvements to the prove pass.

This change has almost no impact on the generated code.
It does not pass toolstash-check, however. In a handful of functions,
for reasons I do not understand, there are minor position changes.

For the entire series ending at this change,
there is negligible compiler performance impact.

The compiler binary shrinks by about 15%,
and package ssa shrinks by about 25%.
Package ssa also compiles ~25% faster with ~25% less memory.

Change-Id: Ia2ee9ceae7be08a17342319d4e31b0bb238a2ee4
Reviewed-on: https://go-review.googlesource.com/c/go/+/213703
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Josh Bleecher Snyder 2020-01-06 22:24:02 -08:00
parent 631b49886c
commit bd6d78ef37
13 changed files with 28316 additions and 102563 deletions

View File

@ -140,18 +140,16 @@ func genRulesSuffix(arch arch, suff string) {
loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno)
for _, rule2 := range expandOr(rule) {
for _, rule3 := range commute(rule2, arch) {
r := Rule{rule: rule3, loc: loc}
if rawop := strings.Split(rule3, " ")[0][1:]; isBlock(rawop, arch) {
blockrules[rawop] = append(blockrules[rawop], r)
continue
}
// Do fancier value op matching.
match, _, _ := r.parse()
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
oprules[opname] = append(oprules[opname], r)
r := Rule{rule: rule2, loc: loc}
if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) {
blockrules[rawop] = append(blockrules[rawop], r)
continue
}
// Do fancier value op matching.
match, _, _ := r.parse()
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
oprules[opname] = append(oprules[opname], r)
}
rule = ""
ruleLineno = 0
@ -489,6 +487,8 @@ func (u *unusedInspector) node(node ast.Node) {
u.scope.objects[name.Name] = obj
case *ast.ReturnStmt:
u.exprs(node.Results)
case *ast.IncDecStmt:
u.node(node.X)
// expressions
@ -554,6 +554,7 @@ type object struct {
func fprint(w io.Writer, n Node) {
switch n := n.(type) {
case *File:
file := n
seenRewrite := make(map[[3]string]string)
fmt.Fprintf(w, "// Code generated from gen/%s%s.rules; DO NOT EDIT.\n", n.arch.name, n.suffix)
fmt.Fprintf(w, "// generated with: cd gen; go run *.go\n")
@ -575,7 +576,11 @@ func fprint(w io.Writer, n Node) {
fprint(w, n)
if rr, ok := n.(*RuleRewrite); ok {
k := [3]string{rr.match, rr.cond, rr.result}
k := [3]string{
normalizeMatch(rr.match, file.arch),
normalizeWhitespace(rr.cond),
normalizeWhitespace(rr.result),
}
if prev, ok := seenRewrite[k]; ok {
log.Fatalf("duplicate rule %s, previously seen at %s\n", rr.loc, prev)
} else {
@ -610,10 +615,27 @@ func fprint(w io.Writer, n Node) {
}
fmt.Fprintf(w, "// result: %s\n", n.result)
fmt.Fprintf(w, "for %s {\n", n.check)
nCommutative := 0
for _, n := range n.list {
if b, ok := n.(*CondBreak); ok {
b.insideCommuteLoop = nCommutative > 0
}
fprint(w, n)
if loop, ok := n.(StartCommuteLoop); ok {
if nCommutative != loop.depth {
panic("mismatch commute loop depth")
}
nCommutative++
}
}
fmt.Fprintf(w, "return true\n}\n")
fmt.Fprintf(w, "return true\n")
for i := 0; i < nCommutative; i++ {
fmt.Fprintln(w, "}")
}
if n.commuteDepth > 0 && n.canFail {
fmt.Fprint(w, "break\n")
}
fmt.Fprintf(w, "}\n")
case *Declare:
fmt.Fprintf(w, "%s := ", n.name)
fprint(w, n.value)
@ -621,12 +643,20 @@ func fprint(w io.Writer, n Node) {
case *CondBreak:
fmt.Fprintf(w, "if ")
fprint(w, n.expr)
fmt.Fprintf(w, " {\nbreak\n}\n")
fmt.Fprintf(w, " {\n")
if n.insideCommuteLoop {
fmt.Fprintf(w, "continue")
} else {
fmt.Fprintf(w, "break")
}
fmt.Fprintf(w, "\n}\n")
case ast.Node:
printConfig.Fprint(w, emptyFset, n)
if _, ok := n.(ast.Stmt); ok {
fmt.Fprintln(w)
}
case StartCommuteLoop:
fmt.Fprintf(w, "for _i%d := 0; _i%d <= 1; _i%d++ {\n", n.depth, n.depth, n.depth)
default:
log.Fatalf("cannot print %T", n)
}
@ -714,15 +744,20 @@ type (
match, cond, result string // top comments
check string // top-level boolean expression
alloc int // for unique var names
loc string // file name & line number of the original rule
alloc int // for unique var names
loc string // file name & line number of the original rule
commuteDepth int // used to track depth of commute loops
}
Declare struct {
name string
value ast.Expr
}
CondBreak struct {
expr ast.Expr
expr ast.Expr
insideCommuteLoop bool
}
StartCommuteLoop struct {
depth int
}
)
@ -759,7 +794,7 @@ func declf(name, format string, a ...interface{}) *Declare {
// breakf constructs a simple "if cond { break }" statement, using exprf for its
// condition.
func breakf(format string, a ...interface{}) *CondBreak {
return &CondBreak{exprf(format, a...)}
return &CondBreak{expr: exprf(format, a...)}
}
func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
@ -779,7 +814,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
cname := fmt.Sprintf("b.Controls[%v]", i)
vname := fmt.Sprintf("v_%v", i)
rr.add(declf(vname, cname))
p, op := genMatch0(rr, arch, arg, vname)
p, op := genMatch0(rr, arch, arg, vname, nil) // TODO: pass non-nil cnt?
if op != "" {
check := fmt.Sprintf("%s.Op == %s", cname, op)
if rr.check == "" {
@ -893,10 +928,11 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
// genMatch returns the variable whose source position should be used for the
// result (or "" if no opinion), and a boolean that reports whether the match can fail.
func genMatch(rr *RuleRewrite, arch arch, match string) (pos, checkOp string) {
return genMatch0(rr, arch, match, "v")
cnt := varCount(rr.match, rr.cond)
return genMatch0(rr, arch, match, "v", cnt)
}
func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string) {
func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int) (pos, checkOp string) {
if match[0] != '(' || match[len(match)-1] != ')' {
log.Fatalf("non-compound expr in genMatch0: %q", match)
}
@ -927,10 +963,20 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string
}
}
commutative := op.commutative
if commutative {
if args[0] == args[1] {
commutative = false
}
if cnt[args[0]] == 1 && cnt[args[1]] == 1 {
commutative = false
}
}
// Access last argument first to minimize bounds checks.
if n := len(args); n > 1 {
a := args[n-1]
if a != "_" && !rr.declared(a) && token.IsIdentifier(a) {
if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
rr.add(declf(a, "%s.Args[%d]", v, n-1))
// delete the last argument so it is not reprocessed
@ -939,7 +985,22 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string
rr.add(stmtf("_ = %s.Args[%d]", v, n-1))
}
}
var commuteDepth int
if commutative {
commuteDepth = rr.commuteDepth
rr.add(StartCommuteLoop{commuteDepth})
rr.commuteDepth++
}
for i, arg := range args {
argidx := strconv.Itoa(i)
if commutative {
switch i {
case 0:
argidx = fmt.Sprintf("_i%d", commuteDepth)
case 1:
argidx = fmt.Sprintf("1^_i%d", commuteDepth)
}
}
if arg == "_" {
continue
}
@ -950,9 +1011,9 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string
// the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering).
rr.add(breakf("%s != %s.Args[%d]", arg, v, i))
rr.add(breakf("%s != %s.Args[%s]", arg, v, argidx))
} else {
rr.add(declf(arg, "%s.Args[%d]", v, i))
rr.add(declf(arg, "%s.Args[%s]", v, argidx))
}
continue
}
@ -969,10 +1030,10 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string) (pos, checkOp string
log.Fatalf("don't name args 'b', it is ambiguous with blocks")
}
rr.add(declf(argname, "%s.Args[%d]", v, i))
rr.add(declf(argname, "%s.Args[%s]", v, argidx))
bexpr := exprf("%s.Op != addLater", argname)
rr.add(&CondBreak{expr: bexpr})
argPos, argCheckOp := genMatch0(rr, arch, arg, argname)
argPos, argCheckOp := genMatch0(rr, arch, arg, argname, cnt)
bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp
if argPos != "" {
@ -1334,99 +1395,6 @@ func expandOr(r string) []string {
return res
}
// commute returns all equivalent rules to r after applying all possible
// argument swaps to the commutable ops in r.
// Potentially exponential, be careful.
func commute(r string, arch arch) []string {
match, cond, result := Rule{rule: r}.parse()
a := commute1(match, varCount(match, cond), arch)
for i, m := range a {
if cond != "" {
m += " && " + cond
}
m += " -> " + result
a[i] = m
}
if len(a) == 1 && normalizeWhitespace(r) != normalizeWhitespace(a[0]) {
fmt.Println(normalizeWhitespace(r))
fmt.Println(normalizeWhitespace(a[0]))
log.Fatalf("commute() is not the identity for noncommuting rule")
}
if false && len(a) > 1 {
fmt.Println(r)
for _, x := range a {
fmt.Println(" " + x)
}
}
return a
}
func commute1(m string, cnt map[string]int, arch arch) []string {
if m[0] == '<' || m[0] == '[' || m[0] == '{' || token.IsIdentifier(m) {
return []string{m}
}
// Split up input.
var prefix string
if i := strings.Index(m, ":"); i >= 0 && token.IsIdentifier(m[:i]) {
prefix = m[:i+1]
m = m[i+1:]
}
if m[0] != '(' || m[len(m)-1] != ')' {
log.Fatalf("non-compound expr in commute1: %q", m)
}
s := split(m[1 : len(m)-1])
op := s[0]
commutative := opIsCommutative(op, arch)
var idx0, idx1 int
if commutative {
// Find indexes of two args we can swap.
for i, arg := range s {
if i == 0 || arg[0] == '<' || arg[0] == '[' || arg[0] == '{' {
continue
}
if idx0 == 0 {
idx0 = i
continue
}
if idx1 == 0 {
idx1 = i
break
}
}
if idx1 == 0 {
log.Fatalf("couldn't find first two args of commutative op %q", s[0])
}
if cnt[s[idx0]] == 1 && cnt[s[idx1]] == 1 || s[idx0] == s[idx1] {
// When we have (Add x y) with no other uses of x and y in the matching rule,
// then we can skip the commutative match (Add y x).
// Same for (Add x x), for any x.
commutative = false
}
}
// Recursively commute arguments.
a := make([][]string, len(s))
for i, arg := range s {
a[i] = commute1(arg, cnt, arch)
}
// Choose all possibilities from all args.
r := crossProduct(a)
// If commutative, do that again with its two args reversed.
if commutative {
a[idx0], a[idx1] = a[idx1], a[idx0]
r = append(r, crossProduct(a)...)
}
// Construct result.
for i, x := range r {
r[i] = prefix + "(" + x + ")"
}
return r
}
// varCount returns a map which counts the number of occurrences of
// Value variables in the s-expression "match" and the Go expression "cond".
func varCount(match, cond string) map[string]int {
@ -1469,22 +1437,6 @@ func varCount1(m string, cnt map[string]int) {
}
}
// crossProduct returns all possible values
// x[0][i] + " " + x[1][j] + " " + ... + " " + x[len(x)-1][k]
// for all valid values of i, j, ..., k.
func crossProduct(x [][]string) []string {
if len(x) == 1 {
return x[0]
}
var r []string
for _, tail := range crossProduct(x[1:]) {
for _, first := range x[0] {
r = append(r, first+" "+tail)
}
}
return r
}
// normalizeWhitespace replaces 2+ whitespace sequences with a single space.
func normalizeWhitespace(x string) string {
x = strings.Join(strings.Fields(x), " ")
@ -1516,3 +1468,26 @@ func opIsCommutative(op string, arch arch) bool {
}
return false
}
func normalizeMatch(m string, arch arch) string {
if token.IsIdentifier(m) {
return m
}
op, typ, auxint, aux, args := extract(m)
if opIsCommutative(op, arch) {
if args[1] < args[0] {
args[0], args[1] = args[1], args[0]
}
}
s := new(strings.Builder)
fmt.Fprintf(s, "%s <%s> [%s] {%s}", op, typ, auxint, aux)
for _, arg := range args {
var prefix string
if i := strings.Index(arg, ":"); i >= 0 && token.IsIdentifier(arg[:i]) {
prefix = arg[:i+1]
arg = arg[i+1:]
}
fmt.Fprint(s, " ", prefix, normalizeMatch(arg, arch))
}
return s.String()
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -546,7 +546,7 @@ func rewriteValueMIPS64(v *Value) bool {
case OpSelect0:
return rewriteValueMIPS64_OpSelect0_0(v)
case OpSelect1:
return rewriteValueMIPS64_OpSelect1_0(v) || rewriteValueMIPS64_OpSelect1_10(v)
return rewriteValueMIPS64_OpSelect1_0(v)
case OpSignExt16to32:
return rewriteValueMIPS64_OpSignExt16to32_0(v)
case OpSignExt16to64:
@ -3093,66 +3093,40 @@ func rewriteValueMIPS64_OpMIPS64ADDV_0(v *Value) bool {
// result: (ADDVconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v.Args[_i0]
v_1 := v.Args[1^_i0]
if v_1.Op != OpMIPS64MOVVconst {
continue
}
c := v_1.AuxInt
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64ADDVconst)
v.AuxInt = c
v.AddArg(x)
return true
}
c := v_1.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64ADDVconst)
v.AuxInt = c
v.AddArg(x)
return true
}
// match: (ADDV (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (ADDVconst [c] x)
for {
x := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MOVVconst {
break
}
c := v_0.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64ADDVconst)
v.AuxInt = c
v.AddArg(x)
return true
break
}
// match: (ADDV x (NEGV y))
// result: (SUBV x y)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64NEGV {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v.Args[_i0]
v_1 := v.Args[1^_i0]
if v_1.Op != OpMIPS64NEGV {
continue
}
y := v_1.Args[0]
v.reset(OpMIPS64SUBV)
v.AddArg(x)
v.AddArg(y)
return true
}
y := v_1.Args[0]
v.reset(OpMIPS64SUBV)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (ADDV (NEGV y) x)
// result: (SUBV x y)
for {
x := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpMIPS64NEGV {
break
}
y := v_0.Args[0]
v.reset(OpMIPS64SUBV)
v.AddArg(x)
v.AddArg(y)
return true
break
}
return false
}
@ -3245,37 +3219,22 @@ func rewriteValueMIPS64_OpMIPS64AND_0(v *Value) bool {
// result: (ANDconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v.Args[_i0]
v_1 := v.Args[1^_i0]
if v_1.Op != OpMIPS64MOVVconst {
continue
}
c := v_1.AuxInt
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64ANDconst)
v.AuxInt = c
v.AddArg(x)
return true
}
c := v_1.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64ANDconst)
v.AuxInt = c
v.AddArg(x)
return true
}
// match: (AND (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (ANDconst [c] x)
for {
x := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MOVVconst {
break
}
c := v_0.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64ANDconst)
v.AuxInt = c
v.AddArg(x)
return true
break
}
// match: (AND x x)
// result: x
@ -5182,37 +5141,22 @@ func rewriteValueMIPS64_OpMIPS64NOR_0(v *Value) bool {
// result: (NORconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v.Args[_i0]
v_1 := v.Args[1^_i0]
if v_1.Op != OpMIPS64MOVVconst {
continue
}
c := v_1.AuxInt
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64NORconst)
v.AuxInt = c
v.AddArg(x)
return true
}
c := v_1.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64NORconst)
v.AuxInt = c
v.AddArg(x)
return true
}
// match: (NOR (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (NORconst [c] x)
for {
x := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MOVVconst {
break
}
c := v_0.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64NORconst)
v.AuxInt = c
v.AddArg(x)
return true
break
}
return false
}
@ -5238,37 +5182,22 @@ func rewriteValueMIPS64_OpMIPS64OR_0(v *Value) bool {
// result: (ORconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v.Args[_i0]
v_1 := v.Args[1^_i0]
if v_1.Op != OpMIPS64MOVVconst {
continue
}
c := v_1.AuxInt
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64ORconst)
v.AuxInt = c
v.AddArg(x)
return true
}
c := v_1.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64ORconst)
v.AuxInt = c
v.AddArg(x)
return true
}
// match: (OR (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (ORconst [c] x)
for {
x := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MOVVconst {
break
}
c := v_0.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64ORconst)
v.AuxInt = c
v.AddArg(x)
return true
break
}
// match: (OR x x)
// result: x
@ -5943,37 +5872,22 @@ func rewriteValueMIPS64_OpMIPS64XOR_0(v *Value) bool {
// result: (XORconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v.Args[_i0]
v_1 := v.Args[1^_i0]
if v_1.Op != OpMIPS64MOVVconst {
continue
}
c := v_1.AuxInt
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64XORconst)
v.AuxInt = c
v.AddArg(x)
return true
}
c := v_1.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64XORconst)
v.AuxInt = c
v.AddArg(x)
return true
}
// match: (XOR (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (XORconst [c] x)
for {
x := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MOVVconst {
break
}
c := v_0.AuxInt
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64XORconst)
v.AuxInt = c
v.AddArg(x)
return true
break
}
// match: (XOR x x)
// result: (MOVVconst [0])
@ -8387,30 +8301,17 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v_0.Args[_i0]
v_0_1 := v_0.Args[1^_i0]
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 {
continue
}
v.reset(OpMIPS64NEGV)
v.AddArg(x)
return true
}
v.reset(OpMIPS64NEGV)
v.AddArg(x)
return true
}
// match: (Select1 (MULVU (MOVVconst [-1]) x))
// result: (NEGV x)
for {
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MULVU {
break
}
x := v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpMIPS64MOVVconst || v_0_0.AuxInt != -1 {
break
}
v.reset(OpMIPS64NEGV)
v.AddArg(x)
return true
break
}
// match: (Select1 (MULVU _ (MOVVconst [0])))
// result: (MOVVconst [0])
@ -8420,29 +8321,16 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool {
break
}
_ = v_0.Args[1]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
break
for _i0 := 0; _i0 <= 1; _i0++ {
v_0_1 := v_0.Args[1^_i0]
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
continue
}
v.reset(OpMIPS64MOVVconst)
v.AuxInt = 0
return true
}
v.reset(OpMIPS64MOVVconst)
v.AuxInt = 0
return true
}
// match: (Select1 (MULVU (MOVVconst [0]) _))
// result: (MOVVconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MULVU {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpMIPS64MOVVconst || v_0_0.AuxInt != 0 {
break
}
v.reset(OpMIPS64MOVVconst)
v.AuxInt = 0
return true
break
}
// match: (Select1 (MULVU x (MOVVconst [1])))
// result: x
@ -8452,32 +8340,18 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v_0.Args[_i0]
v_0_1 := v_0.Args[1^_i0]
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 {
continue
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
// match: (Select1 (MULVU (MOVVconst [1]) x))
// result: x
for {
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MULVU {
break
}
x := v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpMIPS64MOVVconst || v_0_0.AuxInt != 1 {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
break
}
// match: (Select1 (MULVU x (MOVVconst [c])))
// cond: isPowerOfTwo(c)
@ -8488,41 +8362,22 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
x := v_0.Args[_i0]
v_0_1 := v_0.Args[1^_i0]
if v_0_1.Op != OpMIPS64MOVVconst {
continue
}
c := v_0_1.AuxInt
if !(isPowerOfTwo(c)) {
continue
}
v.reset(OpMIPS64SLLVconst)
v.AuxInt = log2(c)
v.AddArg(x)
return true
}
c := v_0_1.AuxInt
if !(isPowerOfTwo(c)) {
break
}
v.reset(OpMIPS64SLLVconst)
v.AuxInt = log2(c)
v.AddArg(x)
return true
}
// match: (Select1 (MULVU (MOVVconst [c]) x))
// cond: isPowerOfTwo(c)
// result: (SLLVconst [log2(c)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MULVU {
break
}
x := v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpMIPS64MOVVconst {
break
}
c := v_0_0.AuxInt
if !(isPowerOfTwo(c)) {
break
}
v.reset(OpMIPS64SLLVconst)
v.AuxInt = log2(c)
v.AddArg(x)
return true
break
}
// match: (Select1 (DIVVU x (MOVVconst [1])))
// result: x
@ -8565,9 +8420,6 @@ func rewriteValueMIPS64_OpSelect1_0(v *Value) bool {
v.AddArg(x)
return true
}
return false
}
func rewriteValueMIPS64_OpSelect1_10(v *Value) bool {
// match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d])))
// result: (MOVVconst [c*d])
for {
@ -8576,41 +8428,22 @@ func rewriteValueMIPS64_OpSelect1_10(v *Value) bool {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpMIPS64MOVVconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
v_0_0 := v_0.Args[_i0]
if v_0_0.Op != OpMIPS64MOVVconst {
continue
}
c := v_0_0.AuxInt
v_0_1 := v_0.Args[1^_i0]
if v_0_1.Op != OpMIPS64MOVVconst {
continue
}
d := v_0_1.AuxInt
v.reset(OpMIPS64MOVVconst)
v.AuxInt = c * d
return true
}
c := v_0_0.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
d := v_0_1.AuxInt
v.reset(OpMIPS64MOVVconst)
v.AuxInt = c * d
return true
}
// match: (Select1 (MULVU (MOVVconst [d]) (MOVVconst [c])))
// result: (MOVVconst [c*d])
for {
v_0 := v.Args[0]
if v_0.Op != OpMIPS64MULVU {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpMIPS64MOVVconst {
break
}
d := v_0_0.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
c := v_0_1.AuxInt
v.reset(OpMIPS64MOVVconst)
v.AuxInt = c * d
return true
break
}
// match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d])))
// result: (MOVVconst [c/d])

File diff suppressed because it is too large Load Diff

View File

@ -3195,39 +3195,24 @@ func rewriteValueRISCV64_OpRISCV64ADD_0(v *Value) bool {
// match: (ADD (MOVDconst [off]) ptr)
// cond: is32Bit(off)
// result: (ADDI [off] ptr)
for {
ptr := v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpRISCV64MOVDconst {
break
}
off := v_0.AuxInt
if !(is32Bit(off)) {
break
}
v.reset(OpRISCV64ADDI)
v.AuxInt = off
v.AddArg(ptr)
return true
}
// match: (ADD ptr (MOVDconst [off]))
// cond: is32Bit(off)
// result: (ADDI [off] ptr)
for {
_ = v.Args[1]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpRISCV64MOVDconst {
break
for _i0 := 0; _i0 <= 1; _i0++ {
v_0 := v.Args[_i0]
if v_0.Op != OpRISCV64MOVDconst {
continue
}
off := v_0.AuxInt
ptr := v.Args[1^_i0]
if !(is32Bit(off)) {
continue
}
v.reset(OpRISCV64ADDI)
v.AuxInt = off
v.AddArg(ptr)
return true
}
off := v_1.AuxInt
if !(is32Bit(off)) {
break
}
v.reset(OpRISCV64ADDI)
v.AuxInt = off
v.AddArg(ptr)
return true
break
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
idea: pack info about value substructure into an int64
all values should be encoded as 1<<x, probably byte by byte
and the hardcoded values should be encoded the same way,
or as 11111111 for "any"
then AND together "have" and "want"
and if zero in any byte (or maybe use popcount < 8?)
then skip because there is a mismatch
possibble things to include:
* number of args of v.Args[0], v.Args[1], ...
* op of v.Args[0]%8, v.Args[1]%8, ...
then can also calculate v.Args[0][0], etc at top and share,
also: cut down commutativity

View File

@ -33,7 +33,7 @@ func rewriteValuedec(v *Value) bool {
return false
}
func rewriteValuedec_OpComplexImag_0(v *Value) bool {
// match: (ComplexImag (ComplexMake _ imag))
// match: (ComplexImag (ComplexMake _ imag ))
// result: imag
for {
v_0 := v.Args[0]
@ -49,7 +49,7 @@ func rewriteValuedec_OpComplexImag_0(v *Value) bool {
return false
}
func rewriteValuedec_OpComplexReal_0(v *Value) bool {
// match: (ComplexReal (ComplexMake real _))
// match: (ComplexReal (ComplexMake real _ ))
// result: real
for {
v_0 := v.Args[0]
@ -265,7 +265,7 @@ func rewriteValuedec_OpSliceLen_0(v *Value) bool {
return false
}
func rewriteValuedec_OpSlicePtr_0(v *Value) bool {
// match: (SlicePtr (SliceMake ptr _ _))
// match: (SlicePtr (SliceMake ptr _ _ ))
// result: ptr
for {
v_0 := v.Args[0]

File diff suppressed because it is too large Load Diff