mirror of https://github.com/golang/go.git
[dev.regabi] cmd/compile: refactor temp construction in walk
This CL adds a few new helper functions for constructing and initializing temporary variables during walk. Passes toolstash -cmp. Change-Id: I54965d992cd8dfef7cb7dc92a17c88372e52a0d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/284224 Trust: Matthew Dempsky <mdempsky@google.com> Run-TryBot: Matthew Dempsky <mdempsky@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
This commit is contained in:
parent
78e5aabcdb
commit
ba0e8a92fa
|
|
@ -277,10 +277,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
||||||
// Allocate hmap on stack.
|
// Allocate hmap on stack.
|
||||||
|
|
||||||
// var hv hmap
|
// var hv hmap
|
||||||
hv := typecheck.Temp(hmapType)
|
|
||||||
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil)))
|
|
||||||
// h = &hv
|
// h = &hv
|
||||||
h = typecheck.NodAddr(hv)
|
h = stackTempAddr(init, hmapType)
|
||||||
|
|
||||||
// Allocate one bucket pointed to by hmap.buckets on stack if hint
|
// Allocate one bucket pointed to by hmap.buckets on stack if hint
|
||||||
// is not larger than BUCKETSIZE. In case hint is larger than
|
// is not larger than BUCKETSIZE. In case hint is larger than
|
||||||
|
|
@ -303,11 +301,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
||||||
nif.Likely = true
|
nif.Likely = true
|
||||||
|
|
||||||
// var bv bmap
|
// var bv bmap
|
||||||
bv := typecheck.Temp(reflectdata.MapBucketType(t))
|
|
||||||
nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
|
|
||||||
|
|
||||||
// b = &bv
|
// b = &bv
|
||||||
b := typecheck.NodAddr(bv)
|
b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
|
||||||
|
|
||||||
// h.buckets = b
|
// h.buckets = b
|
||||||
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
|
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
|
||||||
|
|
@ -509,9 +504,7 @@ func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
|
||||||
if t.Size() >= ir.MaxImplicitStackVarSize {
|
if t.Size() >= ir.MaxImplicitStackVarSize {
|
||||||
base.Fatalf("large ONEW with EscNone: %v", n)
|
base.Fatalf("large ONEW with EscNone: %v", n)
|
||||||
}
|
}
|
||||||
r := typecheck.Temp(t)
|
return stackTempAddr(init, t)
|
||||||
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp
|
|
||||||
return typecheck.Expr(typecheck.NodAddr(r))
|
|
||||||
}
|
}
|
||||||
types.CalcSize(t)
|
types.CalcSize(t)
|
||||||
n.MarkNonNil()
|
n.MarkNonNil()
|
||||||
|
|
|
||||||
|
|
@ -344,30 +344,14 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
|
||||||
if !types.Identical(t, x.Type()) {
|
if !types.Identical(t, x.Type()) {
|
||||||
panic("dotdotdot base type does not match order's assigned type")
|
panic("dotdotdot base type does not match order's assigned type")
|
||||||
}
|
}
|
||||||
|
a = initStackTemp(init, x, vstat != nil)
|
||||||
if vstat == nil {
|
|
||||||
a = ir.NewAssignStmt(base.Pos, x, nil)
|
|
||||||
a = typecheck.Stmt(a)
|
|
||||||
init.Append(a) // zero new temp
|
|
||||||
} else {
|
|
||||||
// Declare that we're about to initialize all of x.
|
|
||||||
// (Which happens at the *vauto = vstat below.)
|
|
||||||
init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x))
|
|
||||||
}
|
|
||||||
|
|
||||||
a = typecheck.NodAddr(x)
|
|
||||||
} else if n.Esc() == ir.EscNone {
|
} else if n.Esc() == ir.EscNone {
|
||||||
a = typecheck.Temp(t)
|
|
||||||
if vstat == nil {
|
if vstat == nil {
|
||||||
a = ir.NewAssignStmt(base.Pos, typecheck.Temp(t), nil)
|
// TODO(mdempsky): Remove this useless temporary.
|
||||||
a = typecheck.Stmt(a)
|
// It's only needed to keep toolstash happy.
|
||||||
init.Append(a) // zero new temp
|
typecheck.Temp(t)
|
||||||
a = a.(*ir.AssignStmt).X
|
|
||||||
} else {
|
|
||||||
init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a))
|
|
||||||
}
|
}
|
||||||
|
a = initStackTemp(init, typecheck.Temp(t), vstat != nil)
|
||||||
a = typecheck.NodAddr(a)
|
|
||||||
} else {
|
} else {
|
||||||
a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
|
a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
|
||||||
}
|
}
|
||||||
|
|
@ -550,9 +534,8 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
|
||||||
|
|
||||||
var r ir.Node
|
var r ir.Node
|
||||||
if n.Prealloc != nil {
|
if n.Prealloc != nil {
|
||||||
// n.Right is stack temporary used as backing store.
|
// n.Prealloc is stack temporary used as backing store.
|
||||||
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Prealloc, nil)) // zero backing store, just in case (#18410)
|
r = initStackTemp(init, n.Prealloc, false)
|
||||||
r = typecheck.NodAddr(n.Prealloc)
|
|
||||||
} else {
|
} else {
|
||||||
r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
|
r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
|
||||||
r.SetEsc(n.Esc())
|
r.SetEsc(n.Esc())
|
||||||
|
|
|
||||||
|
|
@ -198,8 +198,7 @@ func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
||||||
a := typecheck.NodNil()
|
a := typecheck.NodNil()
|
||||||
if n.Esc() == ir.EscNone {
|
if n.Esc() == ir.EscNone {
|
||||||
// Create temporary buffer for string on stack.
|
// Create temporary buffer for string on stack.
|
||||||
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
|
a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
|
||||||
a = typecheck.NodAddr(typecheck.Temp(t))
|
|
||||||
}
|
}
|
||||||
if n.Op() == ir.ORUNES2STR {
|
if n.Op() == ir.ORUNES2STR {
|
||||||
// slicerunetostring(*[32]byte, []rune) string
|
// slicerunetostring(*[32]byte, []rune) string
|
||||||
|
|
@ -229,8 +228,7 @@ func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
||||||
func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
||||||
a := typecheck.NodNil()
|
a := typecheck.NodNil()
|
||||||
if n.Esc() == ir.EscNone {
|
if n.Esc() == ir.EscNone {
|
||||||
t := types.NewArray(types.Types[types.TUINT8], 4)
|
a = stackBufAddr(4, types.Types[types.TUINT8])
|
||||||
a = typecheck.NodAddr(typecheck.Temp(t))
|
|
||||||
}
|
}
|
||||||
// intstring(*[4]byte, rune)
|
// intstring(*[4]byte, rune)
|
||||||
return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
|
return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
|
||||||
|
|
@ -246,7 +244,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
||||||
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
|
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
|
||||||
var a ir.Node
|
var a ir.Node
|
||||||
if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
|
if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
|
||||||
a = typecheck.NodAddr(typecheck.Temp(t))
|
a = stackBufAddr(t.NumElem(), t.Elem())
|
||||||
} else {
|
} else {
|
||||||
types.CalcSize(t)
|
types.CalcSize(t)
|
||||||
a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil)
|
a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil)
|
||||||
|
|
@ -273,8 +271,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
||||||
a := typecheck.NodNil()
|
a := typecheck.NodNil()
|
||||||
if n.Esc() == ir.EscNone {
|
if n.Esc() == ir.EscNone {
|
||||||
// Create temporary buffer for slice on stack.
|
// Create temporary buffer for slice on stack.
|
||||||
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
|
a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
|
||||||
a = typecheck.NodAddr(typecheck.Temp(t))
|
|
||||||
}
|
}
|
||||||
// stringtoslicebyte(*32[byte], string) []byte
|
// stringtoslicebyte(*32[byte], string) []byte
|
||||||
return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
|
return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
|
||||||
|
|
@ -298,8 +295,7 @@ func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
|
||||||
a := typecheck.NodNil()
|
a := typecheck.NodNil()
|
||||||
if n.Esc() == ir.EscNone {
|
if n.Esc() == ir.EscNone {
|
||||||
// Create temporary buffer for slice on stack.
|
// Create temporary buffer for slice on stack.
|
||||||
t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
|
a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32])
|
||||||
a = typecheck.NodAddr(typecheck.Temp(t))
|
|
||||||
}
|
}
|
||||||
// stringtoslicerune(*[32]rune, string) []rune
|
// stringtoslicerune(*[32]rune, string) []rune
|
||||||
return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
|
return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
|
||||||
|
|
|
||||||
|
|
@ -441,8 +441,7 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
|
||||||
// Don't allocate the buffer if the result won't fit.
|
// Don't allocate the buffer if the result won't fit.
|
||||||
if sz < tmpstringbufsize {
|
if sz < tmpstringbufsize {
|
||||||
// Create temporary buffer for result string on stack.
|
// Create temporary buffer for result string on stack.
|
||||||
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
|
buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
|
||||||
buf = typecheck.NodAddr(typecheck.Temp(t))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package walk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmd/compile/internal/base"
|
||||||
|
"cmd/compile/internal/ir"
|
||||||
|
"cmd/compile/internal/typecheck"
|
||||||
|
"cmd/compile/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// initStackTemp appends statements to init to initialize the given
|
||||||
|
// temporary variable, and then returns the expression &tmp. If vardef
|
||||||
|
// is true, then the variable is initialized with OVARDEF, and the
|
||||||
|
// caller must ensure the variable is later assigned before use;
|
||||||
|
// otherwise, it's zero initialized.
|
||||||
|
//
|
||||||
|
// TODO(mdempsky): Change callers to provide tmp's initial value,
|
||||||
|
// rather than just vardef, to make this safer/easier to use.
|
||||||
|
func initStackTemp(init *ir.Nodes, tmp *ir.Name, vardef bool) *ir.AddrExpr {
|
||||||
|
if vardef {
|
||||||
|
init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, tmp))
|
||||||
|
} else {
|
||||||
|
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, nil))
|
||||||
|
}
|
||||||
|
return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stackTempAddr returns the expression &tmp, where tmp is a newly
|
||||||
|
// allocated temporary variable of the given type. Statements to
|
||||||
|
// zero-initialize tmp are appended to init.
|
||||||
|
func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
|
||||||
|
return initStackTemp(init, typecheck.Temp(typ), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stackBufAddr returns thte expression &tmp, where tmp is a newly
|
||||||
|
// allocated temporary variable of type [len]elem. This variable is
|
||||||
|
// initialized, and elem must not contain pointers.
|
||||||
|
func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr {
|
||||||
|
if elem.HasPointers() {
|
||||||
|
base.FatalfAt(base.Pos, "%v has pointers", elem)
|
||||||
|
}
|
||||||
|
tmp := typecheck.Temp(types.NewArray(elem, len))
|
||||||
|
return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue