mirror of https://github.com/golang/go.git
cmd/compile: refactor onebitwalktype1
The existing logic tried to advance the offset for each variable's width, but then tried to undo this logic with the array and struct handling code. It can all be much simpler by only worrying about computing offsets within the array and struct code. While here, include a short-circuit for zero-width arrays to fix a pedantic compiler failure case. Passes toolstash-check. Fixes #20739. Change-Id: I98af9bb512a33e3efe82b8bf1803199edb480640 Reviewed-on: https://go-review.googlesource.com/64471 Run-TryBot: Matthew Dempsky <mdempsky@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Robert Griesemer <gri@golang.org>
This commit is contained in:
parent
e06a64a476
commit
39983cf491
|
|
@ -42,21 +42,17 @@ func emitptrargsmap() {
|
||||||
}
|
}
|
||||||
off := duint32(lsym, 0, uint32(nbitmap))
|
off := duint32(lsym, 0, uint32(nbitmap))
|
||||||
off = duint32(lsym, off, uint32(bv.n))
|
off = duint32(lsym, off, uint32(bv.n))
|
||||||
var xoffset int64
|
|
||||||
if Curfn.IsMethod() {
|
if Curfn.IsMethod() {
|
||||||
xoffset = 0
|
onebitwalktype1(Curfn.Type.Recvs(), 0, bv)
|
||||||
onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if Curfn.Type.NumParams() > 0 {
|
if Curfn.Type.NumParams() > 0 {
|
||||||
xoffset = 0
|
onebitwalktype1(Curfn.Type.Params(), 0, bv)
|
||||||
onebitwalktype1(Curfn.Type.Params(), &xoffset, bv)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
off = dbvec(lsym, off, bv)
|
off = dbvec(lsym, off, bv)
|
||||||
|
|
||||||
if Curfn.Type.NumResults() > 0 {
|
if Curfn.Type.NumResults() > 0 {
|
||||||
xoffset = 0
|
onebitwalktype1(Curfn.Type.Results(), 0, bv)
|
||||||
onebitwalktype1(Curfn.Type.Results(), &xoffset, bv)
|
|
||||||
off = dbvec(lsym, off, bv)
|
off = dbvec(lsym, off, bv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -355,85 +355,63 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
|
||||||
// and then simply copied into bv at the correct offset on future calls with
|
// and then simply copied into bv at the correct offset on future calls with
|
||||||
// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
|
// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
|
||||||
// accounts for 40% of the 6g execution time.
|
// accounts for 40% of the 6g execution time.
|
||||||
func onebitwalktype1(t *types.Type, xoffset *int64, bv bvec) {
|
func onebitwalktype1(t *types.Type, off int64, bv bvec) {
|
||||||
if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
|
if t.Align > 0 && off&int64(t.Align-1) != 0 {
|
||||||
Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
|
Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch t.Etype {
|
switch t.Etype {
|
||||||
case TINT8,
|
case TINT8, TUINT8, TINT16, TUINT16,
|
||||||
TUINT8,
|
TINT32, TUINT32, TINT64, TUINT64,
|
||||||
TINT16,
|
TINT, TUINT, TUINTPTR, TBOOL,
|
||||||
TUINT16,
|
TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
|
||||||
TINT32,
|
|
||||||
TUINT32,
|
|
||||||
TINT64,
|
|
||||||
TUINT64,
|
|
||||||
TINT,
|
|
||||||
TUINT,
|
|
||||||
TUINTPTR,
|
|
||||||
TBOOL,
|
|
||||||
TFLOAT32,
|
|
||||||
TFLOAT64,
|
|
||||||
TCOMPLEX64,
|
|
||||||
TCOMPLEX128:
|
|
||||||
*xoffset += t.Width
|
|
||||||
|
|
||||||
case TPTR32,
|
case TPTR32, TPTR64, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
|
||||||
TPTR64,
|
if off&int64(Widthptr-1) != 0 {
|
||||||
TUNSAFEPTR,
|
|
||||||
TFUNC,
|
|
||||||
TCHAN,
|
|
||||||
TMAP:
|
|
||||||
if *xoffset&int64(Widthptr-1) != 0 {
|
|
||||||
Fatalf("onebitwalktype1: invalid alignment, %v", t)
|
Fatalf("onebitwalktype1: invalid alignment, %v", t)
|
||||||
}
|
}
|
||||||
bv.Set(int32(*xoffset / int64(Widthptr))) // pointer
|
bv.Set(int32(off / int64(Widthptr))) // pointer
|
||||||
*xoffset += t.Width
|
|
||||||
|
|
||||||
case TSTRING:
|
case TSTRING:
|
||||||
// struct { byte *str; intgo len; }
|
// struct { byte *str; intgo len; }
|
||||||
if *xoffset&int64(Widthptr-1) != 0 {
|
if off&int64(Widthptr-1) != 0 {
|
||||||
Fatalf("onebitwalktype1: invalid alignment, %v", t)
|
Fatalf("onebitwalktype1: invalid alignment, %v", t)
|
||||||
}
|
}
|
||||||
bv.Set(int32(*xoffset / int64(Widthptr))) //pointer in first slot
|
bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
|
||||||
*xoffset += t.Width
|
|
||||||
|
|
||||||
case TINTER:
|
case TINTER:
|
||||||
// struct { Itab *tab; void *data; }
|
// struct { Itab *tab; void *data; }
|
||||||
// or, when isnilinter(t)==true:
|
// or, when isnilinter(t)==true:
|
||||||
// struct { Type *type; void *data; }
|
// struct { Type *type; void *data; }
|
||||||
if *xoffset&int64(Widthptr-1) != 0 {
|
if off&int64(Widthptr-1) != 0 {
|
||||||
Fatalf("onebitwalktype1: invalid alignment, %v", t)
|
Fatalf("onebitwalktype1: invalid alignment, %v", t)
|
||||||
}
|
}
|
||||||
bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot
|
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot
|
||||||
bv.Set(int32(*xoffset/int64(Widthptr) + 1)) // pointer in second slot
|
bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
|
||||||
*xoffset += t.Width
|
|
||||||
|
|
||||||
case TSLICE:
|
case TSLICE:
|
||||||
// struct { byte *array; uintgo len; uintgo cap; }
|
// struct { byte *array; uintgo len; uintgo cap; }
|
||||||
if *xoffset&int64(Widthptr-1) != 0 {
|
if off&int64(Widthptr-1) != 0 {
|
||||||
Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
|
Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
|
||||||
}
|
}
|
||||||
bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot (BitsPointer)
|
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
|
||||||
*xoffset += t.Width
|
|
||||||
|
|
||||||
case TARRAY:
|
case TARRAY:
|
||||||
|
elt := t.Elem()
|
||||||
|
if elt.Width == 0 {
|
||||||
|
// Short-circuit for #20739.
|
||||||
|
break
|
||||||
|
}
|
||||||
for i := int64(0); i < t.NumElem(); i++ {
|
for i := int64(0); i < t.NumElem(); i++ {
|
||||||
onebitwalktype1(t.Elem(), xoffset, bv)
|
onebitwalktype1(elt, off, bv)
|
||||||
|
off += elt.Width
|
||||||
}
|
}
|
||||||
|
|
||||||
case TSTRUCT:
|
case TSTRUCT:
|
||||||
var o int64
|
for _, f := range t.Fields().Slice() {
|
||||||
for _, t1 := range t.Fields().Slice() {
|
onebitwalktype1(f.Type, off+f.Offset, bv)
|
||||||
fieldoffset := t1.Offset
|
|
||||||
*xoffset += fieldoffset - o
|
|
||||||
onebitwalktype1(t1.Type, xoffset, bv)
|
|
||||||
o = fieldoffset + t1.Type.Width
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*xoffset += t.Width - o
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
Fatalf("onebitwalktype1: unexpected type, %v", t)
|
Fatalf("onebitwalktype1: unexpected type, %v", t)
|
||||||
}
|
}
|
||||||
|
|
@ -453,8 +431,6 @@ func argswords(lv *Liveness) int32 {
|
||||||
// this argument and the in arguments are always assumed live. The vars
|
// this argument and the in arguments are always assumed live. The vars
|
||||||
// argument is a slice of *Nodes.
|
// argument is a slice of *Nodes.
|
||||||
func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, locals bvec) {
|
func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, locals bvec) {
|
||||||
var xoffset int64
|
|
||||||
|
|
||||||
for i := int32(0); ; i++ {
|
for i := int32(0); ; i++ {
|
||||||
i = liveout.Next(i)
|
i = liveout.Next(i)
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
|
|
@ -463,12 +439,10 @@ func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, l
|
||||||
node := vars[i]
|
node := vars[i]
|
||||||
switch node.Class() {
|
switch node.Class() {
|
||||||
case PAUTO:
|
case PAUTO:
|
||||||
xoffset = node.Xoffset + lv.stkptrsize
|
onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
|
||||||
onebitwalktype1(node.Type, &xoffset, locals)
|
|
||||||
|
|
||||||
case PPARAM, PPARAMOUT:
|
case PPARAM, PPARAMOUT:
|
||||||
xoffset = node.Xoffset
|
onebitwalktype1(node.Type, node.Xoffset, args)
|
||||||
onebitwalktype1(node.Type, &xoffset, args)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1774,8 +1774,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
vec := bvalloc(8 * int32(len(ptrmask)))
|
vec := bvalloc(8 * int32(len(ptrmask)))
|
||||||
xoffset := int64(0)
|
onebitwalktype1(t, 0, vec)
|
||||||
onebitwalktype1(t, &xoffset, vec)
|
|
||||||
|
|
||||||
nptr := typeptrdata(t) / int64(Widthptr)
|
nptr := typeptrdata(t) / int64(Widthptr)
|
||||||
for i := int64(0); i < nptr; i++ {
|
for i := int64(0); i < nptr; i++ {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
// compile
|
||||||
|
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package p
|
||||||
|
|
||||||
|
func F() {
|
||||||
|
var x struct {
|
||||||
|
x *int
|
||||||
|
w [1e9][1e9][1e9][0]*int
|
||||||
|
y *int
|
||||||
|
}
|
||||||
|
println(&x)
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue