mirror of https://github.com/golang/go.git
cmd/compile: start MIPS64 port of SSA backend
Fib with all int and float types run correctly. *, /, shifts, Zero, Move not implemented yet. No optimization yet. Updates #16359. Change-Id: I4b0412954d5fd4c13a5fcddd8689ed8ac701d345 Reviewed-on: https://go-review.googlesource.com/27404 Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
e4cae432d6
commit
310a40b4f2
|
|
@ -6,6 +6,7 @@ package mips64
|
|||
|
||||
import (
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/mips"
|
||||
)
|
||||
|
|
@ -62,6 +63,11 @@ func Main() {
|
|||
gc.Thearch.Doregbits = doregbits
|
||||
gc.Thearch.Regnames = regnames
|
||||
|
||||
gc.Thearch.SSARegToReg = ssaRegToReg
|
||||
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
|
||||
gc.Thearch.SSAGenValue = ssaGenValue
|
||||
gc.Thearch.SSAGenBlock = ssaGenBlock
|
||||
|
||||
gc.Main()
|
||||
gc.Exit(0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,603 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mips64
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/mips"
|
||||
)
|
||||
|
||||
var ssaRegToReg = []int16{
|
||||
mips.REG_R0, // constant 0
|
||||
mips.REG_R1,
|
||||
mips.REG_R2,
|
||||
mips.REG_R3,
|
||||
mips.REG_R4,
|
||||
mips.REG_R5,
|
||||
mips.REG_R6,
|
||||
mips.REG_R7,
|
||||
mips.REG_R8,
|
||||
mips.REG_R9,
|
||||
mips.REG_R10,
|
||||
mips.REG_R11,
|
||||
mips.REG_R12,
|
||||
mips.REG_R13,
|
||||
mips.REG_R14,
|
||||
mips.REG_R15,
|
||||
mips.REG_R16,
|
||||
mips.REG_R17,
|
||||
mips.REG_R18,
|
||||
mips.REG_R19,
|
||||
mips.REG_R20,
|
||||
mips.REG_R21,
|
||||
mips.REG_R22,
|
||||
// R23 = REGTMP not used in regalloc
|
||||
mips.REG_R24,
|
||||
mips.REG_R25,
|
||||
// R26 reserved by kernel
|
||||
// R27 reserved by kernel
|
||||
// R28 = REGSB not used in regalloc
|
||||
mips.REGSP, // R29
|
||||
mips.REGG, // R30
|
||||
// R31 = REGLINK not used in regalloc
|
||||
|
||||
mips.REG_F0,
|
||||
mips.REG_F1,
|
||||
mips.REG_F2,
|
||||
mips.REG_F3,
|
||||
mips.REG_F4,
|
||||
mips.REG_F5,
|
||||
mips.REG_F6,
|
||||
mips.REG_F7,
|
||||
mips.REG_F8,
|
||||
mips.REG_F9,
|
||||
mips.REG_F10,
|
||||
mips.REG_F11,
|
||||
mips.REG_F12,
|
||||
mips.REG_F13,
|
||||
mips.REG_F14,
|
||||
mips.REG_F15,
|
||||
mips.REG_F16,
|
||||
mips.REG_F17,
|
||||
mips.REG_F18,
|
||||
mips.REG_F19,
|
||||
mips.REG_F20,
|
||||
mips.REG_F21,
|
||||
mips.REG_F22,
|
||||
mips.REG_F23,
|
||||
mips.REG_F24,
|
||||
mips.REG_F25,
|
||||
mips.REG_F26,
|
||||
mips.REG_F27,
|
||||
mips.REG_F28,
|
||||
mips.REG_F29,
|
||||
mips.REG_F30,
|
||||
mips.REG_F31,
|
||||
|
||||
mips.REG_HI, // high bits of multiplication
|
||||
mips.REG_LO, // low bits of multiplication
|
||||
|
||||
0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
|
||||
}
|
||||
|
||||
// Smallest possible faulting page at address zero,
|
||||
// see ../../../../runtime/mheap.go:/minPhysPageSize
|
||||
const minZeroPage = 4096
|
||||
|
||||
// loadByType returns the load instruction of the given type.
|
||||
func loadByType(t ssa.Type, r int16) obj.As {
|
||||
if mips.REG_F0 <= r && r <= mips.REG_F31 {
|
||||
if t.IsFloat() && t.Size() == 4 { // float32
|
||||
return mips.AMOVF
|
||||
} else { // float64 or integer in FP register
|
||||
return mips.AMOVD
|
||||
}
|
||||
} else {
|
||||
switch t.Size() {
|
||||
case 1:
|
||||
if t.IsSigned() {
|
||||
return mips.AMOVB
|
||||
} else {
|
||||
return mips.AMOVBU
|
||||
}
|
||||
case 2:
|
||||
if t.IsSigned() {
|
||||
return mips.AMOVH
|
||||
} else {
|
||||
return mips.AMOVHU
|
||||
}
|
||||
case 4:
|
||||
if t.IsSigned() {
|
||||
return mips.AMOVW
|
||||
} else {
|
||||
return mips.AMOVWU
|
||||
}
|
||||
case 8:
|
||||
return mips.AMOVV
|
||||
}
|
||||
}
|
||||
panic("bad load type")
|
||||
}
|
||||
|
||||
// storeByType returns the store instruction of the given type.
|
||||
func storeByType(t ssa.Type, r int16) obj.As {
|
||||
if mips.REG_F0 <= r && r <= mips.REG_F31 {
|
||||
if t.IsFloat() && t.Size() == 4 { // float32
|
||||
return mips.AMOVF
|
||||
} else { // float64 or integer in FP register
|
||||
return mips.AMOVD
|
||||
}
|
||||
} else {
|
||||
switch t.Size() {
|
||||
case 1:
|
||||
return mips.AMOVB
|
||||
case 2:
|
||||
return mips.AMOVH
|
||||
case 4:
|
||||
return mips.AMOVW
|
||||
case 8:
|
||||
return mips.AMOVV
|
||||
}
|
||||
}
|
||||
panic("bad store type")
|
||||
}
|
||||
|
||||
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||
s.SetLineno(v.Line)
|
||||
switch v.Op {
|
||||
case ssa.OpInitMem:
|
||||
// memory arg needs no code
|
||||
case ssa.OpArg:
|
||||
// input args need no code
|
||||
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
|
||||
// nothing to do
|
||||
case ssa.OpCopy, ssa.OpMIPS64MOVVconvert, ssa.OpMIPS64MOVVreg:
|
||||
if v.Type.IsMemory() {
|
||||
return
|
||||
}
|
||||
x := gc.SSARegNum(v.Args[0])
|
||||
y := gc.SSARegNum(v)
|
||||
if x == y {
|
||||
return
|
||||
}
|
||||
as := mips.AMOVV
|
||||
if v.Type.IsFloat() {
|
||||
switch v.Type.Size() {
|
||||
case 4:
|
||||
as = mips.AMOVF
|
||||
case 8:
|
||||
as = mips.AMOVD
|
||||
default:
|
||||
panic("bad float size")
|
||||
}
|
||||
}
|
||||
p := gc.Prog(as)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = x
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = y
|
||||
case ssa.OpMIPS64MOVVnop:
|
||||
if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
|
||||
v.Fatalf("input[0] and output not in same register %s", v.LongString())
|
||||
}
|
||||
// nothing to do
|
||||
case ssa.OpLoadReg:
|
||||
if v.Type.IsFlags() {
|
||||
v.Unimplementedf("load flags not implemented: %v", v.LongString())
|
||||
return
|
||||
}
|
||||
r := gc.SSARegNum(v)
|
||||
p := gc.Prog(loadByType(v.Type, r))
|
||||
n, off := gc.AutoVar(v.Args[0])
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Node = n
|
||||
p.From.Sym = gc.Linksym(n.Sym)
|
||||
p.From.Offset = off
|
||||
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
|
||||
p.From.Name = obj.NAME_PARAM
|
||||
p.From.Offset += n.Xoffset
|
||||
} else {
|
||||
p.From.Name = obj.NAME_AUTO
|
||||
}
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = r
|
||||
case ssa.OpPhi:
|
||||
gc.CheckLoweredPhi(v)
|
||||
case ssa.OpStoreReg:
|
||||
if v.Type.IsFlags() {
|
||||
v.Unimplementedf("store flags not implemented: %v", v.LongString())
|
||||
return
|
||||
}
|
||||
r := gc.SSARegNum(v.Args[0])
|
||||
p := gc.Prog(storeByType(v.Type, r))
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = r
|
||||
n, off := gc.AutoVar(v)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Node = n
|
||||
p.To.Sym = gc.Linksym(n.Sym)
|
||||
p.To.Offset = off
|
||||
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
|
||||
p.To.Name = obj.NAME_PARAM
|
||||
p.To.Offset += n.Xoffset
|
||||
} else {
|
||||
p.To.Name = obj.NAME_AUTO
|
||||
}
|
||||
case ssa.OpMIPS64ADDV,
|
||||
ssa.OpMIPS64SUBV,
|
||||
ssa.OpMIPS64AND,
|
||||
ssa.OpMIPS64OR,
|
||||
ssa.OpMIPS64XOR,
|
||||
ssa.OpMIPS64NOR,
|
||||
ssa.OpMIPS64SLLV,
|
||||
ssa.OpMIPS64SRLV,
|
||||
ssa.OpMIPS64SRAV,
|
||||
ssa.OpMIPS64ADDF,
|
||||
ssa.OpMIPS64ADDD,
|
||||
ssa.OpMIPS64SUBF,
|
||||
ssa.OpMIPS64SUBD,
|
||||
ssa.OpMIPS64MULF,
|
||||
ssa.OpMIPS64MULD,
|
||||
ssa.OpMIPS64DIVF,
|
||||
ssa.OpMIPS64DIVD:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[1])
|
||||
p.Reg = gc.SSARegNum(v.Args[0])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64SGT,
|
||||
ssa.OpMIPS64SGTU:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
p.Reg = gc.SSARegNum(v.Args[1])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64ADDVconst,
|
||||
ssa.OpMIPS64SUBVconst,
|
||||
ssa.OpMIPS64ANDconst,
|
||||
ssa.OpMIPS64ORconst,
|
||||
ssa.OpMIPS64XORconst,
|
||||
ssa.OpMIPS64NORconst,
|
||||
ssa.OpMIPS64SLLVconst,
|
||||
ssa.OpMIPS64SRLVconst,
|
||||
ssa.OpMIPS64SRAVconst,
|
||||
ssa.OpMIPS64SGTconst,
|
||||
ssa.OpMIPS64SGTUconst:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = v.AuxInt
|
||||
p.Reg = gc.SSARegNum(v.Args[0])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64MULV,
|
||||
ssa.OpMIPS64MULVU,
|
||||
ssa.OpMIPS64DIVV,
|
||||
ssa.OpMIPS64DIVVU:
|
||||
// result in hi,lo
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[1])
|
||||
p.Reg = gc.SSARegNum(v.Args[0])
|
||||
case ssa.OpMIPS64MOVVconst:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = v.AuxInt
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64MOVFconst,
|
||||
ssa.OpMIPS64MOVDconst:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_FCONST
|
||||
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64CMPEQF,
|
||||
ssa.OpMIPS64CMPEQD,
|
||||
ssa.OpMIPS64CMPGEF,
|
||||
ssa.OpMIPS64CMPGED,
|
||||
ssa.OpMIPS64CMPGTF,
|
||||
ssa.OpMIPS64CMPGTD:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
p.Reg = gc.SSARegNum(v.Args[1])
|
||||
case ssa.OpMIPS64MOVVaddr:
|
||||
p := gc.Prog(mips.AMOVV)
|
||||
p.From.Type = obj.TYPE_ADDR
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
|
||||
var wantreg string
|
||||
// MOVV $sym+off(base), R
|
||||
// the assembler expands it as the following:
|
||||
// - base is SP: add constant offset to SP (R29)
|
||||
// when constant is large, tmp register (R23) may be used
|
||||
// - base is SB: load external address with relocation
|
||||
switch v.Aux.(type) {
|
||||
default:
|
||||
v.Fatalf("aux is of unknown type %T", v.Aux)
|
||||
case *ssa.ExternSymbol:
|
||||
wantreg = "SB"
|
||||
gc.AddAux(&p.From, v)
|
||||
case *ssa.ArgSymbol, *ssa.AutoSymbol:
|
||||
wantreg = "SP"
|
||||
gc.AddAux(&p.From, v)
|
||||
case nil:
|
||||
// No sym, just MOVV $off(SP), R
|
||||
wantreg = "SP"
|
||||
p.From.Reg = mips.REGSP
|
||||
p.From.Offset = v.AuxInt
|
||||
}
|
||||
if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
|
||||
v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
|
||||
}
|
||||
case ssa.OpMIPS64MOVBload,
|
||||
ssa.OpMIPS64MOVBUload,
|
||||
ssa.OpMIPS64MOVHload,
|
||||
ssa.OpMIPS64MOVHUload,
|
||||
ssa.OpMIPS64MOVWload,
|
||||
ssa.OpMIPS64MOVWUload,
|
||||
ssa.OpMIPS64MOVVload,
|
||||
ssa.OpMIPS64MOVFload,
|
||||
ssa.OpMIPS64MOVDload:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64MOVBstore,
|
||||
ssa.OpMIPS64MOVHstore,
|
||||
ssa.OpMIPS64MOVWstore,
|
||||
ssa.OpMIPS64MOVVstore,
|
||||
ssa.OpMIPS64MOVFstore,
|
||||
ssa.OpMIPS64MOVDstore:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[1])
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = gc.SSARegNum(v.Args[0])
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpMIPS64MOVBstorezero,
|
||||
ssa.OpMIPS64MOVHstorezero,
|
||||
ssa.OpMIPS64MOVWstorezero,
|
||||
ssa.OpMIPS64MOVVstorezero:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = mips.REGZERO
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = gc.SSARegNum(v.Args[0])
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpMIPS64MOVBreg,
|
||||
ssa.OpMIPS64MOVBUreg,
|
||||
ssa.OpMIPS64MOVHreg,
|
||||
ssa.OpMIPS64MOVHUreg,
|
||||
ssa.OpMIPS64MOVWreg,
|
||||
ssa.OpMIPS64MOVWUreg:
|
||||
// TODO: remove extension if after proper load
|
||||
fallthrough
|
||||
case ssa.OpMIPS64MOVWF,
|
||||
ssa.OpMIPS64MOVWD,
|
||||
ssa.OpMIPS64MOVFW,
|
||||
ssa.OpMIPS64MOVDW,
|
||||
ssa.OpMIPS64MOVVF,
|
||||
ssa.OpMIPS64MOVVD,
|
||||
ssa.OpMIPS64MOVFV,
|
||||
ssa.OpMIPS64MOVDV,
|
||||
ssa.OpMIPS64MOVFD,
|
||||
ssa.OpMIPS64MOVDF,
|
||||
ssa.OpMIPS64NEGF,
|
||||
ssa.OpMIPS64NEGD:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64NEGV:
|
||||
// SUB from REGZERO
|
||||
p := gc.Prog(mips.ASUBVU)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
p.Reg = mips.REGZERO
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpMIPS64CALLstatic:
|
||||
if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
|
||||
// Deferred calls will appear to be returning to
|
||||
// the CALL deferreturn(SB) that we are about to emit.
|
||||
// However, the stack trace code will show the line
|
||||
// of the instruction byte before the return PC.
|
||||
// To avoid that being an unrelated instruction,
|
||||
// insert an actual hardware NOP that will have the right line number.
|
||||
// This is different from obj.ANOP, which is a virtual no-op
|
||||
// that doesn't make it into the instruction stream.
|
||||
ginsnop()
|
||||
}
|
||||
p := gc.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
|
||||
if gc.Maxarg < v.AuxInt {
|
||||
gc.Maxarg = v.AuxInt
|
||||
}
|
||||
case ssa.OpMIPS64CALLclosure:
|
||||
p := gc.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Offset = 0
|
||||
p.To.Reg = gc.SSARegNum(v.Args[0])
|
||||
if gc.Maxarg < v.AuxInt {
|
||||
gc.Maxarg = v.AuxInt
|
||||
}
|
||||
case ssa.OpMIPS64CALLdefer:
|
||||
p := gc.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
|
||||
if gc.Maxarg < v.AuxInt {
|
||||
gc.Maxarg = v.AuxInt
|
||||
}
|
||||
case ssa.OpMIPS64CALLgo:
|
||||
p := gc.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = gc.Linksym(gc.Newproc.Sym)
|
||||
if gc.Maxarg < v.AuxInt {
|
||||
gc.Maxarg = v.AuxInt
|
||||
}
|
||||
case ssa.OpMIPS64CALLinter:
|
||||
p := gc.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Offset = 0
|
||||
p.To.Reg = gc.SSARegNum(v.Args[0])
|
||||
if gc.Maxarg < v.AuxInt {
|
||||
gc.Maxarg = v.AuxInt
|
||||
}
|
||||
case ssa.OpMIPS64LoweredNilCheck:
|
||||
// TODO: optimization
|
||||
// Issue a load which will fault if arg is nil.
|
||||
p := gc.Prog(mips.AMOVB)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = mips.REGZERO
|
||||
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
|
||||
gc.Warnl(v.Line, "generated nil check")
|
||||
}
|
||||
case ssa.OpVarDef:
|
||||
gc.Gvardef(v.Aux.(*gc.Node))
|
||||
case ssa.OpVarKill:
|
||||
gc.Gvarkill(v.Aux.(*gc.Node))
|
||||
case ssa.OpVarLive:
|
||||
gc.Gvarlive(v.Aux.(*gc.Node))
|
||||
case ssa.OpKeepAlive:
|
||||
if !v.Args[0].Type.IsPtrShaped() {
|
||||
v.Fatalf("keeping non-pointer alive %v", v.Args[0])
|
||||
}
|
||||
n, off := gc.AutoVar(v.Args[0])
|
||||
if n == nil {
|
||||
v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
|
||||
}
|
||||
if off != 0 {
|
||||
v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
|
||||
}
|
||||
gc.Gvarlive(n)
|
||||
case ssa.OpMIPS64FPFlagTrue,
|
||||
ssa.OpMIPS64FPFlagFalse:
|
||||
// MOVV $0, r
|
||||
// BFPF 2(PC)
|
||||
// MOVV $1, r
|
||||
branch := mips.ABFPF
|
||||
if v.Op == ssa.OpMIPS64FPFlagFalse {
|
||||
branch = mips.ABFPT
|
||||
}
|
||||
p := gc.Prog(mips.AMOVV)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = mips.REGZERO
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
p2 := gc.Prog(branch)
|
||||
p2.To.Type = obj.TYPE_BRANCH
|
||||
p3 := gc.Prog(mips.AMOVV)
|
||||
p3.From.Type = obj.TYPE_CONST
|
||||
p3.From.Offset = 1
|
||||
p3.To.Type = obj.TYPE_REG
|
||||
p3.To.Reg = gc.SSARegNum(v)
|
||||
p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land
|
||||
gc.Patch(p2, p4)
|
||||
case ssa.OpSelect0, ssa.OpSelect1:
|
||||
// nothing to do
|
||||
case ssa.OpMIPS64LoweredGetClosurePtr:
|
||||
// Closure pointer is R22 (mips.REGCTXT).
|
||||
gc.CheckLoweredGetClosurePtr(v)
|
||||
default:
|
||||
v.Unimplementedf("genValue not implemented: %s", v.LongString())
|
||||
}
|
||||
}
|
||||
|
||||
var blockJump = map[ssa.BlockKind]struct {
|
||||
asm, invasm obj.As
|
||||
}{
|
||||
ssa.BlockMIPS64EQ: {mips.ABEQ, mips.ABNE},
|
||||
ssa.BlockMIPS64NE: {mips.ABNE, mips.ABEQ},
|
||||
ssa.BlockMIPS64LTZ: {mips.ABLTZ, mips.ABGEZ},
|
||||
ssa.BlockMIPS64GEZ: {mips.ABGEZ, mips.ABLTZ},
|
||||
ssa.BlockMIPS64LEZ: {mips.ABLEZ, mips.ABGTZ},
|
||||
ssa.BlockMIPS64GTZ: {mips.ABGTZ, mips.ABLEZ},
|
||||
ssa.BlockMIPS64FPT: {mips.ABFPT, mips.ABFPF},
|
||||
ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
|
||||
}
|
||||
|
||||
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
||||
s.SetLineno(b.Line)
|
||||
|
||||
switch b.Kind {
|
||||
case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
|
||||
if b.Succs[0].Block() != next {
|
||||
p := gc.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
case ssa.BlockDefer:
|
||||
// defer returns in R1:
|
||||
// 0 if we should continue executing
|
||||
// 1 if we should jump to deferreturn call
|
||||
p := gc.Prog(mips.ABNE)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = mips.REGZERO
|
||||
p.Reg = mips.REG_R1
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
||||
if b.Succs[0].Block() != next {
|
||||
p := gc.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
case ssa.BlockExit:
|
||||
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
|
||||
case ssa.BlockRet:
|
||||
gc.Prog(obj.ARET)
|
||||
case ssa.BlockRetJmp:
|
||||
p := gc.Prog(obj.ARET)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
|
||||
case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE,
|
||||
ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ,
|
||||
ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ,
|
||||
ssa.BlockMIPS64FPT, ssa.BlockMIPS64FPF:
|
||||
jmp := blockJump[b.Kind]
|
||||
var p *obj.Prog
|
||||
switch next {
|
||||
case b.Succs[0].Block():
|
||||
p = gc.Prog(jmp.invasm)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
||||
case b.Succs[1].Block():
|
||||
p = gc.Prog(jmp.asm)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
default:
|
||||
p = gc.Prog(jmp.asm)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
q := gc.Prog(obj.AJMP)
|
||||
q.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
|
||||
}
|
||||
if !b.Control.Type.IsFlags() {
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = gc.SSARegNum(b.Control)
|
||||
}
|
||||
default:
|
||||
b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
|
||||
}
|
||||
}
|
||||
|
|
@ -194,6 +194,16 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
|
|||
c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
|
||||
c.NeedsFpScratch = true
|
||||
c.hasGReg = true
|
||||
case "mips64", "mips64le":
|
||||
c.IntSize = 8
|
||||
c.PtrSize = 8
|
||||
c.lowerBlock = rewriteBlockMIPS64
|
||||
c.lowerValue = rewriteValueMIPS64
|
||||
c.registers = registersMIPS64[:]
|
||||
c.gpRegMask = gpRegMaskMIPS64
|
||||
c.fpRegMask = fpRegMaskMIPS64
|
||||
c.FPReg = framepointerRegMIPS64
|
||||
c.hasGReg = true
|
||||
default:
|
||||
fe.Unimplementedf(0, "arch %s not implemented", arch)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,215 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
(AddPtr x y) -> (ADDV x y)
|
||||
(Add64 x y) -> (ADDV x y)
|
||||
(Add32 x y) -> (ADDV x y)
|
||||
(Add16 x y) -> (ADDV x y)
|
||||
(Add8 x y) -> (ADDV x y)
|
||||
(Add32F x y) -> (ADDF x y)
|
||||
(Add64F x y) -> (ADDD x y)
|
||||
|
||||
(SubPtr x y) -> (SUBV x y)
|
||||
(Sub64 x y) -> (SUBV x y)
|
||||
(Sub32 x y) -> (SUBV x y)
|
||||
(Sub16 x y) -> (SUBV x y)
|
||||
(Sub8 x y) -> (SUBV x y)
|
||||
(Sub32F x y) -> (SUBF x y)
|
||||
(Sub64F x y) -> (SUBD x y)
|
||||
|
||||
(And64 x y) -> (AND x y)
|
||||
(And32 x y) -> (AND x y)
|
||||
(And16 x y) -> (AND x y)
|
||||
(And8 x y) -> (AND x y)
|
||||
|
||||
(Or64 x y) -> (OR x y)
|
||||
(Or32 x y) -> (OR x y)
|
||||
(Or16 x y) -> (OR x y)
|
||||
(Or8 x y) -> (OR x y)
|
||||
|
||||
(Xor64 x y) -> (XOR x y)
|
||||
(Xor32 x y) -> (XOR x y)
|
||||
(Xor16 x y) -> (XOR x y)
|
||||
(Xor8 x y) -> (XOR x y)
|
||||
|
||||
// unary ops
|
||||
(Neg64 x) -> (NEGV x)
|
||||
(Neg32 x) -> (NEGV x)
|
||||
(Neg16 x) -> (NEGV x)
|
||||
(Neg8 x) -> (NEGV x)
|
||||
(Neg32F x) -> (NEGF x)
|
||||
(Neg64F x) -> (NEGD x)
|
||||
|
||||
(Com64 x) -> (NOR (MOVVconst [0]) x)
|
||||
(Com32 x) -> (NOR (MOVVconst [0]) x)
|
||||
(Com16 x) -> (NOR (MOVVconst [0]) x)
|
||||
(Com8 x) -> (NOR (MOVVconst [0]) x)
|
||||
|
||||
// boolean ops -- booleans are represented with 0=false, 1=true
|
||||
(AndB x y) -> (AND x y)
|
||||
(OrB x y) -> (OR x y)
|
||||
(EqB x y) -> (XOR (MOVVconst [1]) (XOR <config.fe.TypeBool()> x y))
|
||||
(NeqB x y) -> (XOR x y)
|
||||
(Not x) -> (XOR (MOVVconst [1]) x)
|
||||
|
||||
// constants
|
||||
(Const64 [val]) -> (MOVVconst [val])
|
||||
(Const32 [val]) -> (MOVVconst [val])
|
||||
(Const16 [val]) -> (MOVVconst [val])
|
||||
(Const8 [val]) -> (MOVVconst [val])
|
||||
(Const32F [val]) -> (MOVFconst [val])
|
||||
(Const64F [val]) -> (MOVDconst [val])
|
||||
(ConstNil) -> (MOVVconst [0])
|
||||
(ConstBool [b]) -> (MOVVconst [b])
|
||||
|
||||
// truncations
|
||||
// Because we ignore high parts of registers, truncates are just copies.
|
||||
(Trunc16to8 x) -> x
|
||||
(Trunc32to8 x) -> x
|
||||
(Trunc32to16 x) -> x
|
||||
(Trunc64to8 x) -> x
|
||||
(Trunc64to16 x) -> x
|
||||
(Trunc64to32 x) -> x
|
||||
|
||||
// Zero-/Sign-extensions
|
||||
(ZeroExt8to16 x) -> (MOVBUreg x)
|
||||
(ZeroExt8to32 x) -> (MOVBUreg x)
|
||||
(ZeroExt16to32 x) -> (MOVHUreg x)
|
||||
(ZeroExt8to64 x) -> (MOVBUreg x)
|
||||
(ZeroExt16to64 x) -> (MOVHUreg x)
|
||||
(ZeroExt32to64 x) -> (MOVWUreg x)
|
||||
|
||||
(SignExt8to16 x) -> (MOVBreg x)
|
||||
(SignExt8to32 x) -> (MOVBreg x)
|
||||
(SignExt16to32 x) -> (MOVHreg x)
|
||||
(SignExt8to64 x) -> (MOVBreg x)
|
||||
(SignExt16to64 x) -> (MOVHreg x)
|
||||
(SignExt32to64 x) -> (MOVWreg x)
|
||||
|
||||
// float <-> int conversion
|
||||
(Cvt32to32F x) -> (MOVWF x)
|
||||
(Cvt32to64F x) -> (MOVWD x)
|
||||
(Cvt64to32F x) -> (MOVVF x)
|
||||
(Cvt64to64F x) -> (MOVVD x)
|
||||
(Cvt32Fto32 x) -> (MOVFW x)
|
||||
(Cvt64Fto32 x) -> (MOVDW x)
|
||||
(Cvt32Fto64 x) -> (MOVFV x)
|
||||
(Cvt64Fto64 x) -> (MOVDV x)
|
||||
(Cvt32Fto64F x) -> (MOVFD x)
|
||||
(Cvt64Fto32F x) -> (MOVDF x)
|
||||
|
||||
// comparisons
|
||||
(Eq8 x y) -> (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
|
||||
(Eq16 x y) -> (SGTU (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)) (MOVVconst [0]))
|
||||
(Eq32 x y) -> (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
|
||||
(Eq64 x y) -> (SGTU (XOR x y) (MOVVconst [0]))
|
||||
(EqPtr x y) -> (SGTU (XOR x y) (MOVVconst [0]))
|
||||
(Eq32F x y) -> (FPFlagTrue (CMPEQF x y))
|
||||
(Eq64F x y) -> (FPFlagTrue (CMPEQD x y))
|
||||
|
||||
(Neq8 x y) -> (SGTU (MOVVconst [0]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
|
||||
(Neq16 x y) -> (SGTU (MOVVconst [0]) (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)))
|
||||
(Neq32 x y) -> (SGTU (MOVVconst [0]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) )
|
||||
(Neq64 x y) -> (SGTU (MOVVconst [0]) (XOR x y))
|
||||
(NeqPtr x y) -> (SGTU (MOVVconst [0]) (XOR x y))
|
||||
(Neq32F x y) -> (FPFlagFalse (CMPEQF x y))
|
||||
(Neq64F x y) -> (FPFlagFalse (CMPEQD x y))
|
||||
|
||||
(Less8 x y) -> (SGT (SignExt8to64 y) (SignExt8to64 x))
|
||||
(Less16 x y) -> (SGT (SignExt16to64 y) (SignExt16to64 x))
|
||||
(Less32 x y) -> (SGT (SignExt32to64 y) (SignExt32to64 x))
|
||||
(Less64 x y) -> (SGT y x)
|
||||
(Less32F x y) -> (FPFlagTrue (CMPGTF y x)) // reverse operands to work around NaN
|
||||
(Less64F x y) -> (FPFlagTrue (CMPGTD y x)) // reverse operands to work around NaN
|
||||
|
||||
(Less8U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
|
||||
(Less16U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
|
||||
(Less32U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
|
||||
(Less64U x y) -> (SGTU y x)
|
||||
|
||||
(Leq8 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
|
||||
(Leq16 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
|
||||
(Leq32 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
|
||||
(Leq64 x y) -> (XOR (MOVVconst [1]) (SGT x y))
|
||||
(Leq32F x y) -> (FPFlagTrue (CMPGEF y x)) // reverse operands to work around NaN
|
||||
(Leq64F x y) -> (FPFlagTrue (CMPGED y x)) // reverse operands to work around NaN
|
||||
|
||||
(Leq8U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
|
||||
(Leq16U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
|
||||
(Leq32U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
|
||||
(Leq64U x y) -> (XOR (MOVVconst [1]) (SGTU x y))
|
||||
|
||||
(Greater8 x y) -> (SGT (SignExt8to64 x) (SignExt8to64 y))
|
||||
(Greater16 x y) -> (SGT (SignExt16to64 x) (SignExt16to64 y))
|
||||
(Greater32 x y) -> (SGT (SignExt32to64 x) (SignExt32to64 y))
|
||||
(Greater64 x y) -> (SGT x y)
|
||||
(Greater32F x y) -> (FPFlagTrue (CMPGTF x y))
|
||||
(Greater64F x y) -> (FPFlagTrue (CMPGTD x y))
|
||||
|
||||
(Greater8U x y) -> (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))
|
||||
(Greater16U x y) -> (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))
|
||||
(Greater32U x y) -> (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))
|
||||
(Greater64U x y) -> (SGTU x y)
|
||||
|
||||
(Geq8 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt8to64 y) (SignExt8to64 x)))
|
||||
(Geq16 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt16to64 y) (SignExt16to64 x)))
|
||||
(Geq32 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt32to64 y) (SignExt32to64 x)))
|
||||
(Geq64 x y) -> (XOR (MOVVconst [1]) (SGT y x))
|
||||
(Geq32F x y) -> (FPFlagTrue (CMPGEF x y))
|
||||
(Geq64F x y) -> (FPFlagTrue (CMPGED x y))
|
||||
|
||||
(Geq8U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)))
|
||||
(Geq16U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)))
|
||||
(Geq32U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)))
|
||||
(Geq64U x y) -> (XOR (MOVVconst [1]) (SGTU y x))
|
||||
|
||||
(OffPtr [off] ptr:(SP)) -> (MOVVaddr [off] ptr)
|
||||
(OffPtr [off] ptr) -> (ADDVconst [off] ptr)
|
||||
|
||||
(Addr {sym} base) -> (MOVVaddr {sym} base)
|
||||
|
||||
// loads
|
||||
(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
|
||||
(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
|
||||
(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
|
||||
(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
|
||||
(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
|
||||
(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
|
||||
(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
|
||||
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVVload ptr mem)
|
||||
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
|
||||
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
|
||||
|
||||
// stores
|
||||
(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
|
||||
(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
|
||||
(Store [4] ptr val mem) && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
|
||||
(Store [8] ptr val mem) && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem)
|
||||
(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
|
||||
(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
|
||||
|
||||
// calls
|
||||
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
|
||||
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
|
||||
(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
|
||||
(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
|
||||
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
|
||||
|
||||
// checks
|
||||
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
|
||||
(IsNonNil ptr) -> (SGTU ptr (MOVVconst [0]))
|
||||
(IsInBounds idx len) -> (SGTU len idx)
|
||||
(IsSliceInBounds idx len) -> (XOR (MOVVconst [1]) (SGTU idx len))
|
||||
|
||||
// pseudo-ops
|
||||
(GetClosurePtr) -> (LoweredGetClosurePtr)
|
||||
(Convert x mem) -> (MOVVconvert x mem)
|
||||
|
||||
(If cond yes no) -> (NE cond yes no)
|
||||
|
||||
// Optimizations
|
||||
|
||||
// Absorb boolean tests into block
|
||||
(NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
|
||||
(NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
|
||||
|
|
@ -0,0 +1,314 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import "strings"
|
||||
|
||||
// Notes:
|
||||
// - Integer types live in the low portion of registers. Upper portions are junk.
|
||||
// - Boolean types use the low-order byte of a register. 0=false, 1=true.
|
||||
// Upper bytes are junk.
|
||||
// - *const instructions may use a constant larger than the instuction can encode.
|
||||
// In this case the assembler expands to multiple instructions and uses tmp
|
||||
// register (R23).
|
||||
|
||||
// Suffixes encode the bit width of various instructions.
|
||||
// V (vlong) = 64 bit
|
||||
// WU (word) = 32 bit unsigned
|
||||
// W (word) = 32 bit
|
||||
// H (half word) = 16 bit
|
||||
// HU = 16 bit unsigned
|
||||
// B (byte) = 8 bit
|
||||
// BU = 8 bit unsigned
|
||||
// F (float) = 32 bit float
|
||||
// D (double) = 64 bit float
|
||||
|
||||
// Note: registers not used in regalloc are not included in this list,
|
||||
// so that regmask stays within int64
|
||||
// Be careful when hand coding regmasks.
|
||||
var regNamesMIPS64 = []string{
|
||||
"R0", // constant 0
|
||||
"R1",
|
||||
"R2",
|
||||
"R3",
|
||||
"R4",
|
||||
"R5",
|
||||
"R6",
|
||||
"R7",
|
||||
"R8",
|
||||
"R9",
|
||||
"R10",
|
||||
"R11",
|
||||
"R12",
|
||||
"R13",
|
||||
"R14",
|
||||
"R15",
|
||||
"R16",
|
||||
"R17",
|
||||
"R18",
|
||||
"R19",
|
||||
"R20",
|
||||
"R21",
|
||||
"R22",
|
||||
// R23 = REGTMP not used in regalloc
|
||||
"R24",
|
||||
"R25",
|
||||
// R26 reserved by kernel
|
||||
// R27 reserved by kernel
|
||||
// R28 = REGSB not used in regalloc
|
||||
"SP", // aka R29
|
||||
"g", // aka R30
|
||||
// R31 = REGLINK not used in regalloc
|
||||
|
||||
"F0",
|
||||
"F1",
|
||||
"F2",
|
||||
"F3",
|
||||
"F4",
|
||||
"F5",
|
||||
"F6",
|
||||
"F7",
|
||||
"F8",
|
||||
"F9",
|
||||
"F10",
|
||||
"F11",
|
||||
"F12",
|
||||
"F13",
|
||||
"F14",
|
||||
"F15",
|
||||
"F16",
|
||||
"F17",
|
||||
"F18",
|
||||
"F19",
|
||||
"F20",
|
||||
"F21",
|
||||
"F22",
|
||||
"F23",
|
||||
"F24", // 0.0
|
||||
"F25",
|
||||
"F26", // 0.5
|
||||
"F27",
|
||||
"F28", // 1.0
|
||||
"F29",
|
||||
"F30", // 2.0
|
||||
"F31",
|
||||
|
||||
"HI", // high bits of multiplication
|
||||
"LO", // low bits of multiplication
|
||||
|
||||
// pseudo-registers
|
||||
"SB",
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Make map from reg names to reg integers.
|
||||
if len(regNamesMIPS64) > 64 {
|
||||
panic("too many registers")
|
||||
}
|
||||
num := map[string]int{}
|
||||
for i, name := range regNamesMIPS64 {
|
||||
num[name] = i
|
||||
}
|
||||
buildReg := func(s string) regMask {
|
||||
m := regMask(0)
|
||||
for _, r := range strings.Split(s, " ") {
|
||||
if n, ok := num[r]; ok {
|
||||
m |= regMask(1) << uint(n)
|
||||
continue
|
||||
}
|
||||
panic("register " + r + " not found")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Common individual register masks
|
||||
var (
|
||||
gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25")
|
||||
gpg = gp | buildReg("g")
|
||||
gpsp = gp | buildReg("SP")
|
||||
gpspg = gpg | buildReg("SP")
|
||||
gpspsbg = gpspg | buildReg("SB")
|
||||
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F25 F27 F29 F31")
|
||||
lo = buildReg("LO")
|
||||
hi = buildReg("HI")
|
||||
callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
|
||||
)
|
||||
// Common regInfo
|
||||
var (
|
||||
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
|
||||
gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
|
||||
gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
|
||||
gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
|
||||
gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
|
||||
gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
|
||||
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
|
||||
gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
|
||||
fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
|
||||
fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
|
||||
//fp1flags = regInfo{inputs: []regMask{fp}}
|
||||
//fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
|
||||
//gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
|
||||
fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
|
||||
fp2flags = regInfo{inputs: []regMask{fp, fp}}
|
||||
fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
|
||||
fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
|
||||
readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
|
||||
)
|
||||
ops := []opData{
|
||||
// binary ops
|
||||
{name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
|
||||
{name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt
|
||||
{name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
|
||||
{name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
|
||||
{name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true}, // arg0 * arg1, signed, results hi,lo
|
||||
{name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true}, // arg0 * arg1, unsigned, results hi,lo
|
||||
{name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
|
||||
{name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
|
||||
|
||||
{name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
|
||||
{name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
|
||||
{name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
|
||||
{name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
|
||||
{name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
|
||||
{name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
|
||||
{name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
|
||||
{name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
|
||||
|
||||
{name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
|
||||
{name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
|
||||
{name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
|
||||
{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt
|
||||
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
|
||||
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
|
||||
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
|
||||
{name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
|
||||
|
||||
{name: "NEGV", argLength: 1, reg: gp11}, // -arg0
|
||||
{name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
|
||||
{name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
|
||||
|
||||
// shifts
|
||||
{name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64
|
||||
{name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt
|
||||
{name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64
|
||||
{name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned
|
||||
{name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64
|
||||
{name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed
|
||||
|
||||
// comparisons
|
||||
{name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
|
||||
{name: "SGTconst", argLength: 2, reg: gp21, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if arg0 > auxInt (signed), 0 otherwise
|
||||
{name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
|
||||
{name: "SGTUconst", argLength: 2, reg: gp21, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if arg0 > auxInt (unsigned), 0 otherwise
|
||||
|
||||
{name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
|
||||
{name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
|
||||
{name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
|
||||
{name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
|
||||
{name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
|
||||
{name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
|
||||
|
||||
// moves
|
||||
{name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint
|
||||
{name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
|
||||
{name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
|
||||
|
||||
{name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
|
||||
|
||||
{name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64"}, // load from arg0 + auxInt + aux. arg1=mem.
|
||||
|
||||
{name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
{name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
{name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
{name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
{name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
{name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
|
||||
{name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
|
||||
|
||||
// conversions
|
||||
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
|
||||
{name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
|
||||
{name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
|
||||
{name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
|
||||
{name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
|
||||
{name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
|
||||
{name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0
|
||||
|
||||
{name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
|
||||
|
||||
{name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
|
||||
{name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
|
||||
{name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32
|
||||
{name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64
|
||||
{name: "MOVFW", argLength: 1, reg: fp11, asm: "MOVFW"}, // float32 -> int32
|
||||
{name: "MOVDW", argLength: 1, reg: fp11, asm: "MOVDW"}, // float64 -> int32
|
||||
{name: "MOVFV", argLength: 1, reg: fp11, asm: "MOVFV"}, // float32 -> int64
|
||||
{name: "MOVDV", argLength: 1, reg: fp11, asm: "MOVDV"}, // float64 -> int64
|
||||
{name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
|
||||
{name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
|
||||
|
||||
// function calls
|
||||
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
|
||||
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
|
||||
{name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
|
||||
{name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
|
||||
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
|
||||
|
||||
// pseudo-ops
|
||||
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil. arg1=mem.
|
||||
|
||||
{name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
|
||||
{name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
|
||||
|
||||
// Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
|
||||
// and sorts it to the very beginning of the block to prevent other
|
||||
// use of R22 (mips.REGCTXT, the closure pointer)
|
||||
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}},
|
||||
|
||||
// MOVDconvert converts between pointers and integers.
|
||||
// We have a special op for this so as to not confuse GC
|
||||
// (particularly stack maps). It takes a memory arg so it
|
||||
// gets correctly ordered with respect to GC safepoints.
|
||||
// arg0=ptr/int arg1=mem, output=int/ptr
|
||||
{name: "MOVVconvert", argLength: 2, reg: gp11, asm: "MOVV"},
|
||||
}
|
||||
|
||||
blocks := []blockData{
|
||||
{name: "EQ"},
|
||||
{name: "NE"},
|
||||
{name: "LTZ"}, // < 0
|
||||
{name: "LEZ"}, // <= 0
|
||||
{name: "GTZ"}, // > 0
|
||||
{name: "GEZ"}, // >= 0
|
||||
{name: "FPT"}, // FP flag is true
|
||||
{name: "FPF"}, // FP flag is false
|
||||
}
|
||||
|
||||
archs = append(archs, arch{
|
||||
name: "MIPS64",
|
||||
pkg: "cmd/internal/obj/mips",
|
||||
genfile: "../../mips64/ssa.go",
|
||||
ops: ops,
|
||||
blocks: blocks,
|
||||
regnames: regNamesMIPS64,
|
||||
gpregmask: gp,
|
||||
fpregmask: fp,
|
||||
framepointerreg: -1, // not used
|
||||
})
|
||||
}
|
||||
|
|
@ -234,6 +234,7 @@ func genRules(arch arch) {
|
|||
if strings.Contains(s[1], "(") {
|
||||
genMatch0(w, arch, s[1], "v", map[string]struct{}{}, false, rule.loc)
|
||||
} else {
|
||||
fmt.Fprintf(w, "_ = v\n") // in case we don't use v
|
||||
fmt.Fprintf(w, "%s := b.Control\n", s[1])
|
||||
}
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -13711,6 +13711,7 @@ func rewriteBlock386(b *Block) bool {
|
|||
// result: (NE (TESTB cond cond) yes no)
|
||||
for {
|
||||
v := b.Control
|
||||
_ = v
|
||||
cond := b.Control
|
||||
yes := b.Succs[0]
|
||||
no := b.Succs[1]
|
||||
|
|
|
|||
|
|
@ -18896,6 +18896,7 @@ func rewriteBlockAMD64(b *Block) bool {
|
|||
// result: (NE (TESTB cond cond) yes no)
|
||||
for {
|
||||
v := b.Control
|
||||
_ = v
|
||||
cond := b.Control
|
||||
yes := b.Succs[0]
|
||||
no := b.Succs[1]
|
||||
|
|
|
|||
|
|
@ -17305,6 +17305,7 @@ func rewriteBlockARM(b *Block) bool {
|
|||
// result: (NE (CMPconst [0] cond) yes no)
|
||||
for {
|
||||
v := b.Control
|
||||
_ = v
|
||||
cond := b.Control
|
||||
yes := b.Succs[0]
|
||||
no := b.Succs[1]
|
||||
|
|
|
|||
|
|
@ -15200,6 +15200,7 @@ func rewriteBlockARM64(b *Block) bool {
|
|||
// result: (NE (CMPconst [0] cond) yes no)
|
||||
for {
|
||||
v := b.Control
|
||||
_ = v
|
||||
cond := b.Control
|
||||
yes := b.Succs[0]
|
||||
no := b.Succs[1]
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -6834,6 +6834,7 @@ func rewriteBlockPPC64(b *Block) bool {
|
|||
// result: (NE (CMPWconst [0] cond) yes no)
|
||||
for {
|
||||
v := b.Control
|
||||
_ = v
|
||||
cond := b.Control
|
||||
yes := b.Succs[0]
|
||||
no := b.Succs[1]
|
||||
|
|
|
|||
Loading…
Reference in New Issue