diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index b855f4a174..3dc6b98123 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -371,6 +371,7 @@ func (e *Escape) stmt(n *Node) { e.stmts(n.Right.Ninit) e.call(e.addrs(n.List), n.Right, nil) case ORETURN: + e.curfn.Func.numReturns++ results := e.curfn.Type.Results().FieldSlice() for i, v := range n.List.Slice() { e.assign(asNode(results[i].Nname), v, "return", n) @@ -378,6 +379,16 @@ func (e *Escape) stmt(n *Node) { case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER: e.call(nil, n, nil) case OGO, ODEFER: + if n.Op == ODEFER { + e.curfn.Func.SetHasDefer(true) + e.curfn.Func.numDefers++ + if e.curfn.Func.numDefers > maxOpenDefers { + // Don't allow open defers if there are more than + // 8 defers in the function, since we use a single + // byte to record active defers. + e.curfn.Func.SetOpenCodedDeferDisallowed(true) + } + } e.stmts(n.Left.Ninit) e.call(nil, n.Left, n) @@ -872,8 +883,13 @@ func (e *Escape) augmentParamHole(k EscHole, where *Node) EscHole { // non-transient location to avoid arguments from being // transiently allocated. if where.Op == ODEFER && e.loopDepth == 1 { - where.Esc = EscNever // force stack allocation of defer record (see ssa.go) + // force stack allocation of defer record, unless open-coded + // defers are used (see ssa.go) + where.Esc = EscNever return e.later(k) + } else if where.Op == ODEFER { + // If any defer occurs in a loop, open-coded defers cannot be used + e.curfn.Func.SetOpenCodedDeferDisallowed(true) } return e.heapHole() diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 05aac9ecb2..8806386707 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -52,6 +52,7 @@ var ( Debug_typecheckinl int Debug_gendwarfinl int Debug_softfloat int + Debug_defer int ) // Debug arguments. @@ -81,6 +82,7 @@ var debugtab = []struct { {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl}, {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl}, {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat}, + {"defer", "print information about defer compilation", &Debug_defer}, } const debugHelpHeader = `usage: -d arg[,arg]* and arg is [=] diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index be13b27892..83371fabf5 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -294,6 +294,9 @@ func addGCLocals() { } ggloblsym(x, int32(len(x.P)), attr) } + if x := s.Func.OpenCodedDeferInfo; x != nil { + ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + } } } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 9e3dca25c8..00a24f2dff 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -338,6 +338,7 @@ func deferstruct(stksize int64) *types.Type { makefield("siz", types.Types[TUINT32]), makefield("started", types.Types[TBOOL]), makefield("heap", types.Types[TBOOL]), + makefield("openDefer", types.Types[TBOOL]), makefield("sp", types.Types[TUINTPTR]), makefield("pc", types.Types[TUINTPTR]), // Note: the types here don't really matter. Defer structures @@ -346,6 +347,9 @@ func deferstruct(stksize int64) *types.Type { makefield("fn", types.Types[TUINTPTR]), makefield("_panic", types.Types[TUINTPTR]), makefield("link", types.Types[TUINTPTR]), + makefield("framepc", types.Types[TUINTPTR]), + makefield("varp", types.Types[TUINTPTR]), + makefield("fd", types.Types[TUINTPTR]), makefield("args", argtype), } diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go index f4725c0eb2..ce4a216c2e 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/gc/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 116, 208}, + {Func{}, 124, 224}, {Name{}, 32, 56}, {Param{}, 24, 48}, {Node{}, 76, 128}, diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index dd8dacd149..200cca1063 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -29,6 +29,10 @@ var ssaDumpStdout bool // whether to dump to stdout var ssaDumpCFG string // generate CFGs for these phases const ssaDumpFile = "ssa.html" +// The max number of defers in a function using open-coded defers. We enforce this +// limit because the deferBits bitmask is currently a single byte (to minimize code size) +const maxOpenDefers = 8 + // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. var ssaDumpInlined []*Node @@ -165,6 +169,107 @@ func initssaconfig() { SigPanic = sysfunc("sigpanic") } +// getParam returns the Field of ith param of node n (which is a +// function/method/interface call), where the receiver of a method call is +// considered as the 0th parameter. This does not include the receiver of an +// interface call. +func getParam(n *Node, i int) *types.Field { + t := n.Left.Type + if n.Op == OCALLMETH { + if i == 0 { + return t.Recv() + } + return t.Params().Field(i - 1) + } + return t.Params().Field(i) +} + +// dvarint writes a varint v to the funcdata in symbol x and returns the new offset +func dvarint(x *obj.LSym, off int, v int64) int { + if v < 0 || v > 1e9 { + panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v)) + } + if v < 1<<7 { + return duint8(x, off, uint8(v)) + } + off = duint8(x, off, uint8((v&127)|128)) + if v < 1<<14 { + return duint8(x, off, uint8(v>>7)) + } + off = duint8(x, off, uint8(((v>>7)&127)|128)) + if v < 1<<21 { + return duint8(x, off, uint8(v>>14)) + } + off = duint8(x, off, uint8(((v>>14)&127)|128)) + if v < 1<<28 { + return duint8(x, off, uint8(v>>21)) + } + off = duint8(x, off, uint8(((v>>21)&127)|128)) + return duint8(x, off, uint8(v>>28)) +} + +// emitOpenDeferInfo emits FUNCDATA information about the defers in a function +// that is using open-coded defers. This funcdata is used to determine the active +// defers in a function and execute those defers during panic processing. +// +// The funcdata is all encoded in varints (since values will almost always be less +// than 128, but stack offsets could potentially be up to 2Gbyte). All "locations" +// for stack variables are specified as the number of bytes below varp for their +// starting address. The format is: +// +// - Max total argument size among all the defers +// - Location of the deferBits variable +// - Number of defers in the function +// - Information about each defer call, in reverse order of appearance in the function: +// - Total argument size of the call +// - Location of the closure value to call +// - 1 or 0 to indicate if there is a receiver for the call +// - If yes, then the location of the receiver value +// - Number of arguments +// - Information about each argument +// - Location of the stored defer argument in this function's frame +// - Size of the argument +// - Offset of where argument should be placed in the args frame when making call +func emitOpenDeferInfo(s state) { + x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer") + s.curfn.Func.lsym.Func.OpenCodedDeferInfo = x + off := 0 + + // Compute maxargsize (max size of arguments for all defers) + // first, so we can output it first to the funcdata + var maxargsize int64 + for i := len(s.opendefers) - 1; i >= 0; i-- { + r := s.opendefers[i] + argsize := r.n.Left.Type.ArgWidth() + if argsize > maxargsize { + maxargsize = argsize + } + } + off = dvarint(x, off, maxargsize) + off = dvarint(x, off, -s.deferBitsTemp.Xoffset) + off = dvarint(x, off, int64(len(s.opendefers))) + + // Write in reverse-order, for ease of running in that order at runtime + for i := len(s.opendefers) - 1; i >= 0; i-- { + r := s.opendefers[i] + off = dvarint(x, off, r.n.Left.Type.ArgWidth()) + off = dvarint(x, off, -r.closureNode.Xoffset) + if r.rcvrNode != nil { + off = dvarint(x, off, 1) + off = dvarint(x, off, -r.rcvrNode.Xoffset) + } else { + off = dvarint(x, off, 0) + } + off = dvarint(x, off, int64(len(r.argNodes))) + for j, arg := range r.argNodes { + f := getParam(r.n, j) + off = dvarint(x, off, -arg.Xoffset) + off = dvarint(x, off, f.Type.Size()) + off = dvarint(x, off, f.Offset) + } + } +} + // buildssa builds an SSA function for fn. // worker indicates which of the backend workers is doing the processing. func buildssa(fn *Node, worker int) *ssa.Func { @@ -227,11 +332,48 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.labeledNodes = map[*Node]*ssaLabel{} s.fwdVars = map[*Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) + + s.hasOpenDefers = Debug['N'] == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() + if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 { + // Skip doing open defers if there is any extra exit code (likely + // copying heap-allocated return values or race detection), since + // we will not generate that code in the case of the extra + // deferreturn/ret segment. + s.hasOpenDefers = false + } + if s.hasOpenDefers && + s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 { + // Since we are generating defer calls at every exit for + // open-coded defers, skip doing open-coded defers if there are + // too many returns (especially if there are multiple defers). + // Open-coded defers are most important for improving performance + // for smaller functions (which don't have many returns). + s.hasOpenDefers = false + } + s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) s.startBlock(s.f.Entry) s.vars[&memVar] = s.startmem + if s.hasOpenDefers { + // Create the deferBits variable and stack slot. deferBits is a + // bitmask showing which of the open-coded defers in this function + // have been activated. + deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8]) + s.deferBitsTemp = deferBitsTemp + // For this value, AuxInt is initialized to zero by default + startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8]) + s.vars[&deferBitsVar] = startDeferBits + s.deferBitsAddr = s.addr(deferBitsTemp, false) + s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits) + // Make sure that the deferBits stack slot is kept alive (for use + // by panics) and stores to deferBits are not eliminated, even if + // all checking code on deferBits in the function exit can be + // eliminated, because the defer statements were all + // unconditional. + s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) + } // Generate addresses of local declarations s.decladdrs = map[*Node]*ssa.Value{} @@ -287,6 +429,11 @@ func buildssa(fn *Node, worker int) *ssa.Func { // Main call to ssa package to compile function ssa.Compile(s.f) + + if s.hasOpenDefers { + emitOpenDeferInfo(s) + } + return s.f } @@ -375,6 +522,29 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) { } } +// Information about each open-coded defer. +type openDeferInfo struct { + // The ODEFER node representing the function call of the defer + n *Node + // If defer call is closure call, the address of the argtmp where the + // closure is stored. + closure *ssa.Value + // The node representing the argtmp where the closure is stored - used for + // function, method, or interface call, to store a closure that panic + // processing can use for this defer. + closureNode *Node + // If defer call is interface call, the address of the argtmp where the + // receiver is stored + rcvr *ssa.Value + // The node representing the argtmp where the receiver is stored + rcvrNode *Node + // The addresses of the argtmps where the evaluated arguments of the defer + // function call are stored. + argVals []*ssa.Value + // The nodes representing the argtmps where the args of the defer are stored + argNodes []*Node +} + type state struct { // configuration (arch) information config *ssa.Config @@ -416,6 +586,9 @@ type state struct { startmem *ssa.Value sp *ssa.Value sb *ssa.Value + // value representing address of where deferBits autotmp is stored + deferBitsAddr *ssa.Value + deferBitsTemp *Node // line number stack. The current line number is top of stack line []src.XPos @@ -432,6 +605,19 @@ type state struct { cgoUnsafeArgs bool hasdefer bool // whether the function contains a defer statement softFloat bool + hasOpenDefers bool // whether we are doing open-coded defers + + // If doing open-coded defers, list of info about the defer calls in + // scanning order. Hence, at exit we should run these defers in reverse + // order of this list + opendefers []*openDeferInfo + // For open-coded defers, this is the beginning and end blocks of the last + // defer exit code that we have generated so far. We use these to share + // code between exits if the shareDeferExits option (disabled by default) + // is on. + lastDeferExit *ssa.Block // Entry block of last defer exit code we generated + lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated + lastDeferCount int // Number of defers encountered at that point } type funcLine struct { @@ -469,12 +655,13 @@ var ( memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} // dummy nodes for temporary variables - ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} - lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} - newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} - capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} - typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} - okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} + ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} + lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} + newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} + capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} + typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} + okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} + deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}} ) // startBlock sets the current block we're generating code in to b. @@ -865,11 +1052,27 @@ func (s *state) stmt(n *Node) { } } case ODEFER: - d := callDefer - if n.Esc == EscNever { - d = callDeferStack + if Debug_defer > 0 { + var defertype string + if s.hasOpenDefers { + defertype = "open-coded" + } else if n.Esc == EscNever { + defertype = "stack-allocated" + } else { + defertype = "heap-allocated" + } + Warnl(n.Pos, "defer: %s defer in function %s", + defertype, s.curfn.funcname()) + } + if s.hasOpenDefers { + s.openDeferRecord(n.Left) + } else { + d := callDefer + if n.Esc == EscNever { + d = callDeferStack + } + s.call(n.Left, d) } - s.call(n.Left, d) case OGO: s.call(n.Left, callGo) @@ -1286,12 +1489,28 @@ func (s *state) stmt(n *Node) { } } +// If true, share as many open-coded defer exits as possible (with the downside of +// worse line-number information) +const shareDeferExits = false + // exit processes any code that needs to be generated just before returning. // It returns a BlockRet block that ends the control flow. Its control value // will be set to the final memory state. func (s *state) exit() *ssa.Block { if s.hasdefer { - s.rtcall(Deferreturn, true, nil) + if s.hasOpenDefers { + if shareDeferExits && s.lastDeferExit != nil && len(s.opendefers) == s.lastDeferCount { + if s.curBlock.Kind != ssa.BlockPlain { + panic("Block for an exit should be BlockPlain") + } + s.curBlock.AddEdgeTo(s.lastDeferExit) + s.endBlock() + return s.lastDeferFinalBlock + } + s.openDeferExit() + } else { + s.rtcall(Deferreturn, true, nil) + } } // Run exit code. Typically, this code copies heap-allocated PPARAMOUT @@ -1314,6 +1533,9 @@ func (s *state) exit() *ssa.Block { b := s.endBlock() b.Kind = ssa.BlockRet b.SetControl(m) + if s.hasdefer && s.hasOpenDefers { + s.lastDeferFinalBlock = b + } return b } @@ -3764,6 +3986,230 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value { return args } +// openDeferRecord adds code to evaluate and store the args for an open-code defer +// call, and records info about the defer, so we can generate proper code on the +// exit paths. n is the sub-node of the defer node that is the actual function +// call. We will also record funcdata information on where the args are stored +// (as well as the deferBits variable), and this will enable us to run the proper +// defer calls during panics. +func (s *state) openDeferRecord(n *Node) { + index := len(s.opendefers) + + // Do any needed expression evaluation for the args (including the + // receiver, if any). This may be evaluating something like 'autotmp_3 = + // once.mutex'. Such a statement will create a mapping in s.vars[] from + // the autotmp name to the evaluated SSA arg value, but won't do any + // stores to the stack. + s.stmtList(n.List) + + args := []*ssa.Value{} + argNodes := []*Node{} + + opendefer := &openDeferInfo{ + n: n, + } + fn := n.Left + if n.Op == OCALLFUNC { + // We must always store the function value in a stack slot for the + // runtime panic code to use. But in the defer exit code, we will + // call the function directly if it is a static function. + closureVal := s.expr(fn) + closure := s.openDeferSave(fn, fn.Type, closureVal) + opendefer.closureNode = closure.Aux.(*Node) + if !(fn.Op == ONAME && fn.Class() == PFUNC) { + opendefer.closure = closure + } + } else if n.Op == OCALLMETH { + if fn.Op != ODOTMETH { + Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) + } + closureVal := s.getMethodClosure(fn) + // We must always store the function value in a stack slot for the + // runtime panic code to use. But in the defer exit code, we will + // call the method directly. + closure := s.openDeferSave(fn, fn.Type, closureVal) + opendefer.closureNode = closure.Aux.(*Node) + } else { + if fn.Op != ODOTINTER { + Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) + } + closure, rcvr := s.getClosureAndRcvr(fn) + opendefer.closure = s.openDeferSave(fn, closure.Type, closure) + // Important to get the receiver type correct, so it is recognized + // as a pointer for GC purposes. + opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr) + opendefer.closureNode = opendefer.closure.Aux.(*Node) + opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node) + } + for _, argn := range n.Rlist.Slice() { + v := s.openDeferSave(argn, argn.Type, s.expr(argn)) + args = append(args, v) + argNodes = append(argNodes, v.Aux.(*Node)) + } + opendefer.argVals = args + opendefer.argNodes = argNodes + s.opendefers = append(s.opendefers, opendefer) + + // Update deferBits only after evaluation and storage to stack of + // args/receiver/interface is successful. + bitvalue := s.constInt8(types.Types[TUINT8], 1<= 0; i-- { + r := s.opendefers[i] + bCond := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + + deferBits := s.variable(&deferBitsVar, types.Types[TUINT8]) + // Generate code to check if the bit associated with the current + // defer is set. + bitval := s.constInt8(types.Types[TUINT8], 1<" { return FuncID_wrapper diff --git a/src/cmd/internal/objabi/stack.go b/src/cmd/internal/objabi/stack.go index 62ab0398a6..7320dbf365 100644 --- a/src/cmd/internal/objabi/stack.go +++ b/src/cmd/internal/objabi/stack.go @@ -18,7 +18,7 @@ const ( ) // Initialize StackGuard and StackLimit according to target system. -var StackGuard = 880*stackGuardMultiplier() + StackSystem +var StackGuard = 896*stackGuardMultiplier() + StackSystem var StackLimit = StackGuard - StackSystem - StackSmall // stackGuardMultiplier returns a multiplier to apply to the default diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index d9904f9093..3d7b42b187 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -11,6 +11,7 @@ import ( "cmd/internal/sys" "cmd/link/internal/sym" "encoding/binary" + "fmt" "log" "os" "path/filepath" @@ -255,13 +256,23 @@ func (ctxt *Link) pclntab() { } if r.Type.IsDirectJump() && r.Sym != nil && r.Sym.Name == "runtime.deferreturn" { if ctxt.Arch.Family == sys.Wasm { - deferreturn = lastWasmAddr + deferreturn = lastWasmAddr - 1 } else { // Note: the relocation target is in the call instruction, but // is not necessarily the whole instruction (for instance, on // x86 the relocation applies to bytes [1:5] of the 5 byte call // instruction). deferreturn = uint32(r.Off) + switch ctxt.Arch.Family { + case sys.AMD64, sys.I386, sys.MIPS, sys.MIPS64, sys.RISCV64: + deferreturn-- + case sys.PPC64, sys.ARM, sys.ARM64: + // no change + case sys.S390X: + deferreturn -= 2 + default: + panic(fmt.Sprint("Unhandled architecture:", ctxt.Arch.Family)) + } } break // only need one } diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index d686a8a476..b4236a5239 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -498,7 +498,8 @@ func (ctxt *Link) symtab() { case strings.HasPrefix(s.Name, "gcargs."), strings.HasPrefix(s.Name, "gclocals."), strings.HasPrefix(s.Name, "gclocals·"), - strings.HasPrefix(s.Name, "inltree."): + strings.HasPrefix(s.Name, "inltree."), + strings.HasSuffix(s.Name, ".opendefer"): s.Type = sym.SGOFUNC s.Attr |= sym.AttrNotInSymbolTable s.Outer = symgofunc diff --git a/src/runtime/callers_test.go b/src/runtime/callers_test.go index fcfd10deff..cd1421ba91 100644 --- a/src/runtime/callers_test.go +++ b/src/runtime/callers_test.go @@ -5,6 +5,7 @@ package runtime_test import ( + "internal/race" "reflect" "runtime" "strings" @@ -12,19 +13,19 @@ import ( ) func f1(pan bool) []uintptr { - return f2(pan) // line 15 + return f2(pan) // line 16 } func f2(pan bool) []uintptr { - return f3(pan) // line 19 + return f3(pan) // line 20 } func f3(pan bool) []uintptr { if pan { - panic("f3") // line 24 + panic("f3") // line 25 } ret := make([]uintptr, 20) - return ret[:runtime.Callers(0, ret)] // line 27 + return ret[:runtime.Callers(0, ret)] // line 28 } func testCallers(t *testing.T, pcs []uintptr, pan bool) { @@ -48,16 +49,16 @@ func testCallers(t *testing.T, pcs []uintptr, pan bool) { var f3Line int if pan { - f3Line = 24 + f3Line = 25 } else { - f3Line = 27 + f3Line = 28 } want := []struct { name string line int }{ - {"f1", 15}, - {"f2", 19}, + {"f1", 16}, + {"f2", 20}, {"f3", f3Line}, } for _, w := range want { @@ -188,3 +189,36 @@ func TestCallersDivZeroPanic(t *testing.T) { t.Fatal("did not see divide-by-sizer panic") } } + +// This test will have a slightly different callstack if non-open-coded defers are +// called (e.g. if race checks enabled), because of a difference in the way the +// defer function is invoked. +func TestCallersDeferNilFuncPanic(t *testing.T) { + if race.Enabled { + t.Skip("skipping TestCallersDeferNilFuncPanic under race detector") + } + // Make sure we don't have any extra frames on the stack (due to + // open-coded defer processing) + state := 1 + want := []string{"runtime.Callers", "runtime_test.TestCallersDeferNilFuncPanic.func1", + "runtime.gopanic", "runtime.panicmem", "runtime.sigpanic", + "runtime_test.TestCallersDeferNilFuncPanic"} + + defer func() { + if r := recover(); r == nil { + t.Fatal("did not panic") + } + pcs := make([]uintptr, 20) + pcs = pcs[:runtime.Callers(0, pcs)] + testCallersEqual(t, pcs, want) + if state == 1 { + t.Fatal("nil defer func panicked at defer time rather than function exit time") + } + + }() + var f func() + defer f() + // Use the value of 'state' to make sure nil defer func f causes panic at + // function exit, rather than at the defer statement. + state = 2 +} diff --git a/src/runtime/defer_test.go b/src/runtime/defer_test.go index 0d3e8e9d63..d830fc591f 100644 --- a/src/runtime/defer_test.go +++ b/src/runtime/defer_test.go @@ -15,11 +15,11 @@ import ( // unconditional panic (hence no return from the function) func TestUnconditionalPanic(t *testing.T) { defer func() { - if recover() == nil { + if recover() != "testUnconditional" { t.Fatal("expected unconditional panic") } }() - panic("panic should be recovered") + panic("testUnconditional") } var glob int = 3 @@ -30,7 +30,7 @@ func TestOpenAndNonOpenDefers(t *testing.T) { for { // Non-open defer because in a loop defer func(n int) { - if recover() == nil { + if recover() != "testNonOpenDefer" { t.Fatal("expected testNonOpen panic") } }(3) @@ -45,7 +45,7 @@ func TestOpenAndNonOpenDefers(t *testing.T) { //go:noinline func testOpen(t *testing.T, arg int) { defer func(n int) { - if recover() == nil { + if recover() != "testOpenDefer" { t.Fatal("expected testOpen panic") } }(4) @@ -61,7 +61,7 @@ func TestNonOpenAndOpenDefers(t *testing.T) { for { // Non-open defer because in a loop defer func(n int) { - if recover() == nil { + if recover() != "testNonOpenDefer" { t.Fatal("expected testNonOpen panic") } }(3) @@ -80,7 +80,7 @@ func TestConditionalDefers(t *testing.T) { list = make([]int, 0, 10) defer func() { - if recover() == nil { + if recover() != "testConditional" { t.Fatal("expected panic") } want := []int{4, 2, 1} @@ -106,7 +106,7 @@ func testConditionalDefers(n int) { defer doappend(4) } } - panic("test") + panic("testConditional") } // Test that there is no compile-time or run-time error if an open-coded defer @@ -174,3 +174,52 @@ func TestRecoverMatching(t *testing.T) { }() panic("panic1") } + +type nonSSAable [128]byte + +type bigStruct struct { + x, y, z, w, p, q int64 +} + +func mknonSSAable() nonSSAable { + globint1++ + return nonSSAable{0, 0, 0, 0, 5} +} + +var globint1, globint2 int + +//go:noinline +func sideeffect(n int64) int64 { + globint2++ + return n +} + +// Test that nonSSAable arguments to defer are handled correctly and only evaluated once. +func TestNonSSAableArgs(t *testing.T) { + globint1 = 0 + globint2 = 0 + var save1 byte + var save2 int64 + + defer func() { + if globint1 != 1 { + t.Fatal(fmt.Sprintf("globint1: wanted: 1, got %v", globint1)) + } + if save1 != 5 { + t.Fatal(fmt.Sprintf("save1: wanted: 5, got %v", save1)) + } + if globint2 != 1 { + t.Fatal(fmt.Sprintf("globint2: wanted: 1, got %v", globint2)) + } + if save2 != 2 { + t.Fatal(fmt.Sprintf("save2: wanted: 2, got %v", save2)) + } + }() + + defer func(n nonSSAable) { + save1 = n[4] + }(mknonSSAable()) + defer func(b bigStruct) { + save2 = b.y + }(bigStruct{1, 2, 3, 4, 5, sideeffect(6)}) +} diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h index d9a35c51a0..0fb50ddfba 100644 --- a/src/runtime/funcdata.h +++ b/src/runtime/funcdata.h @@ -17,6 +17,7 @@ #define FUNCDATA_RegPointerMaps 2 #define FUNCDATA_StackObjects 3 #define FUNCDATA_InlTree 4 +#define FUNCDATA_OpenCodedDeferInfo 5 /* info for func with open-coded defers */ // Pseudo-assembly statements. diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 5f33cd7c0c..291a660b3e 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -10,6 +10,19 @@ import ( "unsafe" ) +// We have two different ways of doing defers. The older way involves creating a +// defer record at the time that a defer statement is executing and adding it to a +// defer chain. This chain is inspected by the deferreturn call at all function +// exits in order to run the appropriate defer calls. A cheaper way (which we call +// open-coded defers) is used for functions in which no defer statements occur in +// loops. In that case, we simply store the defer function/arg information into +// specific stack slots at the point of each defer statement, as well as setting a +// bit in a bitmask. At each function exit, we add inline code to directly make +// the appropriate defer calls based on the bitmask and fn/arg information stored +// on the stack. During panic/Goexit processing, the appropriate defer calls are +// made using extra funcdata info that indicates the exact stack slots that +// contain the bitmask and defer fn/args. + // Check to make sure we can really generate a panic. If the panic // was generated from the runtime, or from inside malloc, then convert // to a throw of msg. @@ -263,19 +276,24 @@ func deferprocStack(d *_defer) { // are initialized here. d.started = false d.heap = false + d.openDefer = false d.sp = getcallersp() d.pc = getcallerpc() + d.framepc = 0 + d.varp = 0 // The lines below implement: // d.panic = nil + // d.fp = nil // d.link = gp._defer // gp._defer = d - // But without write barriers. The first two are writes to + // But without write barriers. The first three are writes to // the stack so they don't need a write barrier, and furthermore // are to uninitialized memory, so they must not use a write barrier. - // The third write does not require a write barrier because we + // The fourth write does not require a write barrier because we // explicitly mark all the defer structures, so we don't need to // keep track of pointers to them with a write barrier. *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 + *(*uintptr)(unsafe.Pointer(&d.fd)) = 0 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) @@ -463,8 +481,12 @@ func freedefer(d *_defer) { // started causing a nosplit stack overflow via typedmemmove. d.siz = 0 d.started = false + d.openDefer = false d.sp = 0 d.pc = 0 + d.framepc = 0 + d.varp = 0 + d.fd = nil // d._panic and d.fn must be nil already. // If not, we would have called freedeferpanic or freedeferfn above, // both of which throw. @@ -493,9 +515,11 @@ func freedeferfn() { // to have been called by the caller of deferreturn at the point // just before deferreturn was called. The effect is that deferreturn // is called again and again until there are no more deferred functions. -// Cannot split the stack because we reuse the caller's frame to -// call the deferred function. - +// +// Declared as nosplit, because the function should not be preempted once we start +// modifying the caller's frame in order to reuse the frame to call the deferred +// function. +// // The single argument isn't actually used - it just has its address // taken so it can be matched against pending defers. //go:nosplit @@ -509,6 +533,15 @@ func deferreturn(arg0 uintptr) { if d.sp != sp { return } + if d.openDefer { + done := runOpenDeferFrame(gp, d) + if !done { + throw("unfinished open-coded defers in deferreturn") + } + gp._defer = d.link + freedefer(d) + return + } // Moving arguments around. // @@ -544,6 +577,8 @@ func Goexit() { // This code is similar to gopanic, see that implementation // for detailed comments. gp := getg() + addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) + for { d := gp._defer if d == nil { @@ -554,13 +589,26 @@ func Goexit() { d._panic.aborted = true d._panic = nil } - d.fn = nil - gp._defer = d.link - freedefer(d) - continue + if !d.openDefer { + d.fn = nil + gp._defer = d.link + freedefer(d) + continue + } } d.started = true - reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + if d.openDefer { + done := runOpenDeferFrame(gp, d) + if !done { + // We should always run all defers in the frame, + // since there is no panic associated with this + // defer that can be recovered. + throw("unfinished open-coded defers in Goexit") + } + addOneOpenDeferFrame(gp, 0, nil) + } else { + reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + } if gp._defer != d { throw("bad defer entry in Goexit") } @@ -607,6 +655,182 @@ func printpanics(p *_panic) { print("\n") } +// addOneOpenDeferFrame scans the stack for the first frame (if any) with +// open-coded defers and if it finds one, adds a single record to the defer chain +// for that frame. If sp is non-nil, it starts the stack scan from the frame +// specified by sp. If sp is nil, it uses the sp from the current defer record +// (which has just been finished). Hence, it continues the stack scan from the +// frame of the defer that just finished. It skips any frame that already has an +// open-coded _defer record, which would have been been created from a previous +// (unrecovered) panic. +// +// Note: All entries of the defer chain (including this new open-coded entry) have +// their pointers (including sp) adjusted properly if the stack moves while +// running deferred functions. Also, it is safe to pass in the sp arg (which is +// the direct result of calling getcallersp()), because all pointer variables +// (including arguments) are adjusted as needed during stack copies. +func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) { + var prevDefer *_defer + if sp == nil { + prevDefer = gp._defer + pc = prevDefer.framepc + sp = unsafe.Pointer(prevDefer.sp) + } + systemstack(func() { + gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff, + func(frame *stkframe, unused unsafe.Pointer) bool { + if prevDefer != nil && prevDefer.sp == frame.sp { + // Skip the frame for the previous defer that + // we just finished (and was used to set + // where we restarted the stack scan) + return true + } + f := frame.fn + fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo) + if fd == nil { + return true + } + // Insert the open defer record in the + // chain, in order sorted by sp. + d := gp._defer + var prev *_defer + for d != nil { + dsp := d.sp + if frame.sp < dsp { + break + } + if frame.sp == dsp { + if !d.openDefer { + throw("duplicated defer entry") + } + return true + } + prev = d + d = d.link + } + if frame.fn.deferreturn == 0 { + throw("missing deferreturn") + } + + maxargsize, _ := readvarintUnsafe(fd) + d1 := newdefer(int32(maxargsize)) + d1.openDefer = true + d1._panic = nil + // These are the pc/sp to set after we've + // run a defer in this frame that did a + // recover. We return to a special + // deferreturn that runs any remaining + // defers and then returns from the + // function. + d1.pc = frame.fn.entry + uintptr(frame.fn.deferreturn) + d1.varp = frame.varp + d1.fd = fd + // Save the SP/PC associated with current frame, + // so we can continue stack trace later if needed. + d1.framepc = frame.pc + d1.sp = frame.sp + d1.link = d + if prev == nil { + gp._defer = d1 + } else { + prev.link = d1 + } + // Stop stack scanning after adding one open defer record + return false + }, + nil, 0) + }) +} + +// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the +// uint32 and a pointer to the byte following the varint. +// +// There is a similar function runtime.readvarint, which takes a slice of bytes, +// rather than an unsafe pointer. These functions are duplicated, because one of +// the two use cases for the functions would get slower if the functions were +// combined. +func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { + var r uint32 + var shift int + for { + b := *(*uint8)((unsafe.Pointer(fd))) + fd = add(fd, unsafe.Sizeof(b)) + if b < 128 { + return r + uint32(b)< 28 { + panic("Bad varint") + } + } +} + +// runOpenDeferFrame runs the active open-coded defers in the frame specified by +// d. It normally processes all active defers in the frame, but stops immediately +// if a defer does a successful recover. It returns true if there are no +// remaining defers to run in the frame. +func runOpenDeferFrame(gp *g, d *_defer) bool { + done := true + fd := d.fd + + // Skip the maxargsize + _, fd = readvarintUnsafe(fd) + deferBitsOffset, fd := readvarintUnsafe(fd) + nDefers, fd := readvarintUnsafe(fd) + deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) + + for i := int(nDefers) - 1; i >= 0; i-- { + // read the funcdata info for this defer + var argWidth, closureOffset, hasRcvrOffset, rcvrOffset, nArgs uint32 + argWidth, fd = readvarintUnsafe(fd) + closureOffset, fd = readvarintUnsafe(fd) + hasRcvrOffset, fd = readvarintUnsafe(fd) + if hasRcvrOffset > 0 { + rcvrOffset, fd = readvarintUnsafe(fd) + } + nArgs, fd = readvarintUnsafe(fd) + if deferBits&(1< 0 { + *(*unsafe.Pointer)(deferArgs) = *(*unsafe.Pointer)((unsafe.Pointer)((d.varp - uintptr(rcvrOffset)))) + } + for j := uint32(0); j < nArgs; j++ { + var argOffset, argLen, argCallOffset uint32 + argOffset, fd = readvarintUnsafe(fd) + argLen, fd = readvarintUnsafe(fd) + argCallOffset, fd = readvarintUnsafe(fd) + memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)), + unsafe.Pointer(d.varp-uintptr(argOffset)), + uintptr(argLen)) + } + deferBits = deferBits &^ (1 << i) + *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits + if d._panic != nil { + d._panic.argp = unsafe.Pointer(getargp(0)) + } + reflectcall(nil, unsafe.Pointer(closure), deferArgs, argWidth, argWidth) + d.fn = nil + // These args are just a copy, so can be cleared immediately + memclrNoHeapPointers(deferArgs, uintptr(argWidth)) + if d._panic != nil && d._panic.recovered { + done = deferBits == 0 + break + } + } + + return done +} + // The implementation of the predeclared function panic. func gopanic(e interface{}) { gp := getg() @@ -646,6 +870,10 @@ func gopanic(e interface{}) { atomic.Xadd(&runningPanicDefers, 1) + // By calculating getcallerpc/getcallersp here, we avoid scanning the + // gopanic frame (stack scanning is slow...) + addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) + for { d := gp._defer if d == nil { @@ -659,10 +887,16 @@ func gopanic(e interface{}) { d._panic.aborted = true } d._panic = nil - d.fn = nil - gp._defer = d.link - freedefer(d) - continue + if !d.openDefer { + // For open-coded defers, we need to process the + // defer again, in case there are any other defers + // to call in the frame (not including the defer + // call that caused the panic). + d.fn = nil + gp._defer = d.link + freedefer(d) + continue + } } // Mark defer as started, but keep on list, so that traceback @@ -675,8 +909,16 @@ func gopanic(e interface{}) { // will find d in the list and will mark d._panic (this panic) aborted. d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) - p.argp = unsafe.Pointer(getargp(0)) - reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + done := true + if d.openDefer { + done = runOpenDeferFrame(gp, d) + if done && !d._panic.recovered { + addOneOpenDeferFrame(gp, 0, nil) + } + } else { + p.argp = unsafe.Pointer(getargp(0)) + reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + } p.argp = nil // reflectcall did not panic. Remove d. @@ -684,18 +926,52 @@ func gopanic(e interface{}) { throw("bad defer entry in panic") } d._panic = nil - d.fn = nil - gp._defer = d.link // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic //GC() pc := d.pc sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy - freedefer(d) + if done { + d.fn = nil + gp._defer = d.link + freedefer(d) + } if p.recovered { atomic.Xadd(&runningPanicDefers, -1) + if done { + // Remove any remaining non-started, open-coded defer + // entry after a recover (there's at most one, if we just + // ran a non-open-coded defer), since the entry will + // become out-dated and the defer will be executed + // normally. + d := gp._defer + var prev *_defer + for d != nil { + if d.openDefer { + if d.started { + // This defer is started but we + // are in the middle of a + // defer-panic-recover inside of + // it, so don't remove it or any + // further defer entries + break + } + if prev == nil { + gp._defer = d.link + } else { + prev.link = d.link + } + freedefer(d) + break + } else { + prev = d + d = d.link + } + } + } + gp._panic = p.link // Aborted panics are marked but remain on the g.panic list. // Remove them from the list. @@ -803,7 +1079,7 @@ func recovery(gp *g) { } // Make the deferproc for this d return again, - // this time returning 1. The calling function will + // this time returning 1. The calling function will // jump to the standard return epilogue. gp.sched.sp = sp gp.sched.pc = pc diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index dd399e00a6..20813d09eb 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -701,7 +701,7 @@ type _func struct { nameoff int32 // function name args int32 // in/out args size - deferreturn uint32 // offset of a deferreturn block from entry, if any. + deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. pcsp int32 pcfile int32 @@ -774,7 +774,7 @@ func extendRandom(r []byte, n int) { } // A _defer holds an entry on the list of deferred calls. -// If you add a field here, add code to clear it in freedefer. +// If you add a field here, add code to clear it in freedefer and deferProcStack // This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct // and cmd/compile/internal/gc/ssa.go:(*state).call. // Some defers will be allocated on the stack and some on the heap. @@ -785,11 +785,27 @@ type _defer struct { siz int32 // includes both arguments and results started bool heap bool - sp uintptr // sp at time of defer - pc uintptr - fn *funcval - _panic *_panic // panic that is running defer - link *_defer + // openDefer indicates that this _defer is for a frame with open-coded + // defers. We have only one defer record for the entire frame (which may + // currently have 0, 1, or more defers active). + openDefer bool + sp uintptr // sp at time of defer + pc uintptr // pc at time of defer + fn *funcval + _panic *_panic // panic that is running defer + link *_defer + + // If openDefer is true, the fields below record values about the stack + // frame and associated function that has the open-coded defer(s). sp + // above will be the sp for the frame, and pc will be address of the + // deferreturn call in the function. + fd unsafe.Pointer // funcdata for the function associated with the frame + varp uintptr // value of varp for the stack frame + // framepc is the current pc associated with the stack frame. Together, + // with sp above (which is the sp associated with the stack frame), + // framepc/sp can be used as pc/sp pair to continue a stack trace via + // gentraceback(). + framepc uintptr } // A _panic holds information about an active panic. diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go index ab7a03b2d1..a1946d3fcb 100644 --- a/src/runtime/runtime_test.go +++ b/src/runtime/runtime_test.go @@ -89,7 +89,7 @@ func BenchmarkDefer(b *testing.B) { } func defer1() { - defer func(x, y, z int) { + func(x, y, z int) { if recover() != nil || x != 1 || y != 2 || z != 3 { panic("bad recover") } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 271b24c58a..d72582e82e 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -91,7 +91,7 @@ const ( // The stack guard is a pointer this many bytes above the // bottom of the stack. - _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem + _StackGuard = 896*sys.StackGuardMultiplier + _StackSystem // After a stack split check the SP is allowed to be this // many bytes below the stack guard. This saves an instruction @@ -736,6 +736,8 @@ func adjustdefers(gp *g, adjinfo *adjustinfo) { adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) adjustpointer(adjinfo, unsafe.Pointer(&d.link)) + adjustpointer(adjinfo, unsafe.Pointer(&d.varp)) + adjustpointer(adjinfo, unsafe.Pointer(&d.fd)) } // Adjust defer argument blocks the same way we adjust active stack frames. diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index df6e02f62a..e99b8cf669 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -216,11 +216,12 @@ const ( _PCDATA_StackMapIndex = 1 _PCDATA_InlTreeIndex = 2 - _FUNCDATA_ArgsPointerMaps = 0 - _FUNCDATA_LocalsPointerMaps = 1 - _FUNCDATA_RegPointerMaps = 2 - _FUNCDATA_StackObjects = 3 - _FUNCDATA_InlTree = 4 + _FUNCDATA_ArgsPointerMaps = 0 + _FUNCDATA_LocalsPointerMaps = 1 + _FUNCDATA_RegPointerMaps = 2 + _FUNCDATA_StackObjects = 3 + _FUNCDATA_InlTree = 4 + _FUNCDATA_OpenCodedDeferInfo = 5 _ArgsSizeUnknown = -0x80000000 ) diff --git a/test/defererrcheck.go b/test/defererrcheck.go new file mode 100644 index 0000000000..5b90e58738 --- /dev/null +++ b/test/defererrcheck.go @@ -0,0 +1,86 @@ +// errorcheck -0 -l -d=defer + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// check that open-coded defers are used in expected situations + +package main + +import "fmt" + +var glob = 3 + +func f1() { + + for i := 0; i < 10; i++ { + fmt.Println("loop") + } + defer func() { // ERROR "open-coded defer in function f1" + fmt.Println("defer") + }() +} + +func f2() { + for { + defer func() { // ERROR "heap-allocated defer in function f2" + fmt.Println("defer1") + }() + if glob > 2 { + break + } + } + defer func() { // ERROR "stack-allocated defer in function f2" + fmt.Println("defer2") + }() +} + +func f3() { + defer func() { // ERROR "stack-allocated defer in function f3" + fmt.Println("defer2") + }() + for { + defer func() { // ERROR "heap-allocated defer in function f3" + fmt.Println("defer1") + }() + if glob > 2 { + break + } + } +} + +func f4() { + defer func() { // ERROR "open-coded defer in function f4" + fmt.Println("defer") + }() +label: + fmt.Println("goto loop") + if glob > 2 { + goto label + } +} + +func f5() { +label: + fmt.Println("goto loop") + defer func() { // ERROR "heap-allocated defer in function f5" + fmt.Println("defer") + }() + if glob > 2 { + goto label + } +} + +func f6() { +label: + fmt.Println("goto loop") + if glob > 2 { + goto label + } + // The current analysis doesn't end a backward goto loop, so this defer is + // considered to be inside a loop + defer func() { // ERROR "heap-allocated defer in function f6" + fmt.Println("defer") + }() +} diff --git a/test/live.go b/test/live.go index b6e6d93f5f..32c397f4a9 100644 --- a/test/live.go +++ b/test/live.go @@ -367,16 +367,19 @@ func f24() { m2[[2]string{"x", "y"}] = nil } -// defer should not cause spurious ambiguously live variables - +// Non-open-coded defers should not cause autotmps. (Open-coded defers do create extra autotmps). func f25(b bool) { - defer g25() + for i := 0; i < 2; i++ { + // Put in loop to make sure defer is not open-coded + defer g25() + } if b { return } var x string x = g14() printstring(x) + return } func g25() @@ -417,7 +420,8 @@ func f27defer(b bool) { defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{" } defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{" - printnl() + printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+" + return // ERROR "live at call to call27: .autotmp_[0-9]+" } // and newproc (go) escapes to the heap @@ -687,12 +691,12 @@ type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "li // In particular, at printint r must be live. func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$" r = p - defer func() { // ERROR "live at call to deferprocStack: q r$" "live at call to deferreturn: r$" + defer func() { recover() }() - printint(0) // ERROR "live at call to printint: q r$" + printint(0) // ERROR "live at call to printint: q r .autotmp_[0-9]+$" r = q - return // ERROR "live at call to deferreturn: r$" + return // ERROR "live at call to f41.func1: r .autotmp_[0-9]+$" } func f42() { diff --git a/test/nosplit.go b/test/nosplit.go index 266e6077b1..3b7e605999 100644 --- a/test/nosplit.go +++ b/test/nosplit.go @@ -309,17 +309,17 @@ TestCases: name := m[1] size, _ := strconv.Atoi(m[2]) - // The limit was originally 128 but is now 752 (880-128). + // The limit was originally 128 but is now 768 (896-128). // Instead of rewriting the test cases above, adjust // the first stack frame to use up the extra bytes. if i == 0 { - size += (880 - 128) - 128 + size += (896 - 128) - 128 // Noopt builds have a larger stackguard. // See ../src/cmd/dist/buildruntime.go:stackGuardMultiplier // This increase is included in objabi.StackGuard for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") { if s == "-N" { - size += 880 + size += 896 } } }