all: single space after period.

The tree's pretty inconsistent about single space vs double space
after a period in documentation. Make it consistently a single space,
per earlier decisions. This means contributors won't be confused by
misleading precedence.

This CL doesn't use go/doc to parse. It only addresses // comments.
It was generated with:

$ perl -i -npe 's,^(\s*// .+[a-z]\.)  +([A-Z]),$1 $2,' $(git grep -l -E '^\s*//(.+\.)  +([A-Z])')
$ go test go/doc -update

Change-Id: Iccdb99c37c797ef1f804a94b22ba5ee4b500c4f7
Reviewed-on: https://go-review.googlesource.com/20022
Reviewed-by: Rob Pike <r@golang.org>
Reviewed-by: Dave Day <djd@golang.org>
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Brad Fitzpatrick 2016-03-01 23:21:55 +00:00
parent 8b4deb448e
commit 5fea2ccc77
536 changed files with 1732 additions and 1732 deletions

View File

@ -316,8 +316,8 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
// succeed, and seems harmless enough. // succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files // The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs // with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead. // for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358 // golang.org/issue/12358
dir, file := path.Split(hdr.Name) dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file) fullName := path.Join(dir, "PaxHeaders.0", file)

View File

@ -234,7 +234,7 @@ func (b *Reader) ReadByte() (byte, error) {
return c, nil return c, nil
} }
// UnreadByte unreads the last byte. Only the most recently read byte can be unread. // UnreadByte unreads the last byte. Only the most recently read byte can be unread.
func (b *Reader) UnreadByte() error { func (b *Reader) UnreadByte() error {
if b.lastByte < 0 || b.r == 0 && b.w > 0 { if b.lastByte < 0 || b.r == 0 && b.w > 0 {
return ErrInvalidUnreadByte return ErrInvalidUnreadByte
@ -273,7 +273,7 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
return r, size, nil return r, size, nil
} }
// UnreadRune unreads the last rune. If the most recent read operation on // UnreadRune unreads the last rune. If the most recent read operation on
// the buffer was not a ReadRune, UnreadRune returns an error. (In this // the buffer was not a ReadRune, UnreadRune returns an error. (In this
// regard it is stricter than UnreadByte, which will unread the last byte // regard it is stricter than UnreadByte, which will unread the last byte
// from any read operation.) // from any read operation.)

View File

@ -44,7 +44,7 @@ var ErrTooLarge = errors.New("bytes.Buffer: too large")
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer // String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>". // as a string. If the Buffer is a nil pointer, it returns "<nil>".
func (b *Buffer) String() string { func (b *Buffer) String() string {
if b == nil { if b == nil {
// Special case, useful in debugging. // Special case, useful in debugging.
@ -145,7 +145,7 @@ func (b *Buffer) WriteString(s string) (n int, err error) {
} }
// MinRead is the minimum slice size passed to a Read call by // MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond // Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the // what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer. // underlying buffer.
const MinRead = 512 const MinRead = 512
@ -252,7 +252,7 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
} }
// Read reads the next len(p) bytes from the buffer or until the buffer // Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the // is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero); // buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil. // otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) { func (b *Buffer) Read(p []byte) (n int, err error) {
@ -347,7 +347,7 @@ func (b *Buffer) UnreadRune() error {
} }
// UnreadByte unreads the last byte returned by the most recent // UnreadByte unreads the last byte returned by the most recent
// read operation. If write has happened since the last read, UnreadByte // read operation. If write has happened since the last read, UnreadByte
// returns an error. // returns an error.
func (b *Buffer) UnreadByte() error { func (b *Buffer) UnreadByte() error {
if b.lastRead != opReadRune && b.lastRead != opRead { if b.lastRead != opReadRune && b.lastRead != opRead {
@ -400,7 +400,7 @@ func (b *Buffer) ReadString(delim byte) (line string, err error) {
} }
// NewBuffer creates and initializes a new Buffer using buf as its initial // NewBuffer creates and initializes a new Buffer using buf as its initial
// contents. It is intended to prepare a Buffer to read existing data. It // contents. It is intended to prepare a Buffer to read existing data. It
// can also be used to size the internal buffer for writing. To do that, // can also be used to size the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero. // buf should have the desired capacity but a length of zero.
// //

View File

@ -164,7 +164,7 @@ func IndexRune(s []byte, r rune) int {
// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points. // IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index of the first occurrence in s of any of the Unicode // It returns the byte index of the first occurrence in s of any of the Unicode
// code points in chars. It returns -1 if chars is empty or if there is no code // code points in chars. It returns -1 if chars is empty or if there is no code
// point in common. // point in common.
func IndexAny(s []byte, chars string) int { func IndexAny(s []byte, chars string) int {
if len(chars) > 0 { if len(chars) > 0 {
@ -188,8 +188,8 @@ func IndexAny(s []byte, chars string) int {
} }
// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code // LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
// points. It returns the byte index of the last occurrence in s of any of // points. It returns the byte index of the last occurrence in s of any of
// the Unicode code points in chars. It returns -1 if chars is empty or if // the Unicode code points in chars. It returns -1 if chars is empty or if
// there is no code point in common. // there is no code point in common.
func LastIndexAny(s []byte, chars string) int { func LastIndexAny(s []byte, chars string) int {
if len(chars) > 0 { if len(chars) > 0 {
@ -276,7 +276,7 @@ func Fields(s []byte) [][]byte {
// FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points. // FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
// It splits the slice s at each run of code points c satisfying f(c) and // It splits the slice s at each run of code points c satisfying f(c) and
// returns a slice of subslices of s. If all code points in s satisfy f(c), or // returns a slice of subslices of s. If all code points in s satisfy f(c), or
// len(s) == 0, an empty slice is returned. // len(s) == 0, an empty slice is returned.
// FieldsFunc makes no guarantees about the order in which it calls f(c). // FieldsFunc makes no guarantees about the order in which it calls f(c).
// If f does not return consistent results for a given c, FieldsFunc may crash. // If f does not return consistent results for a given c, FieldsFunc may crash.
@ -352,12 +352,12 @@ func HasSuffix(s, suffix []byte) bool {
// Map returns a copy of the byte slice s with all its characters modified // Map returns a copy of the byte slice s with all its characters modified
// according to the mapping function. If mapping returns a negative value, the character is // according to the mapping function. If mapping returns a negative value, the character is
// dropped from the string with no replacement. The characters in s and the // dropped from the string with no replacement. The characters in s and the
// output are interpreted as UTF-8-encoded Unicode code points. // output are interpreted as UTF-8-encoded Unicode code points.
func Map(mapping func(r rune) rune, s []byte) []byte { func Map(mapping func(r rune) rune, s []byte) []byte {
// In the worst case, the slice can grow when mapped, making // In the worst case, the slice can grow when mapped, making
// things unpleasant. But it's so rare we barge in assuming it's // things unpleasant. But it's so rare we barge in assuming it's
// fine. It could also shrink but that falls out naturally. // fine. It could also shrink but that falls out naturally.
maxbytes := len(s) // length of b maxbytes := len(s) // length of b
nbytes := 0 // number of bytes encoded in b nbytes := 0 // number of bytes encoded in b
b := make([]byte, maxbytes) b := make([]byte, maxbytes)
@ -697,7 +697,7 @@ func EqualFold(s, t []byte) bool {
return false return false
} }
// General case. SimpleFold(x) returns the next equivalent rune > x // General case. SimpleFold(x) returns the next equivalent rune > x
// or wraps around to smaller values. // or wraps around to smaller values.
r := unicode.SimpleFold(sr) r := unicode.SimpleFold(sr)
for r != sr && r < tr { for r != sr && r < tr {
@ -709,6 +709,6 @@ func EqualFold(s, t []byte) bool {
return false return false
} }
// One string is empty. Are both? // One string is empty. Are both?
return len(s) == len(t) return len(s) == len(t)
} }

View File

@ -113,7 +113,7 @@ func TestEqualExhaustive(t *testing.T) {
} }
} }
// make sure Equal returns false for minimally different strings. The data // make sure Equal returns false for minimally different strings. The data
// is all zeros except for a single one in one location. // is all zeros except for a single one in one location.
func TestNotEqual(t *testing.T) { func TestNotEqual(t *testing.T) {
var size = 128 var size = 128
@ -797,7 +797,7 @@ func TestMap(t *testing.T) {
// Run a couple of awful growth/shrinkage tests // Run a couple of awful growth/shrinkage tests
a := tenRunes('a') a := tenRunes('a')
// 1. Grow. This triggers two reallocations in Map. // 1. Grow. This triggers two reallocations in Map.
maxRune := func(r rune) rune { return unicode.MaxRune } maxRune := func(r rune) rune { return unicode.MaxRune }
m := Map(maxRune, []byte(a)) m := Map(maxRune, []byte(a))
expect := tenRunes(unicode.MaxRune) expect := tenRunes(unicode.MaxRune)

View File

@ -62,7 +62,7 @@ func TestCompareBytes(t *testing.T) {
a := make([]byte, n+1) a := make([]byte, n+1)
b := make([]byte, n+1) b := make([]byte, n+1)
for len := 0; len < 128; len++ { for len := 0; len < 128; len++ {
// randomish but deterministic data. No 0 or 255. // randomish but deterministic data. No 0 or 255.
for i := 0; i < len; i++ { for i := 0; i < len; i++ {
a[i] = byte(1 + 31*i%254) a[i] = byte(1 + 31*i%254)
b[i] = byte(1 + 31*i%254) b[i] = byte(1 + 31*i%254)

View File

@ -14,11 +14,11 @@ import (
) )
// This file tests the situation where memeq is checking // This file tests the situation where memeq is checking
// data very near to a page boundary. We want to make sure // data very near to a page boundary. We want to make sure
// equal does not read across the boundary and cause a page // equal does not read across the boundary and cause a page
// fault where it shouldn't. // fault where it shouldn't.
// This test runs only on linux. The code being tested is // This test runs only on linux. The code being tested is
// not OS-specific, so it does not need to be tested on all // not OS-specific, so it does not need to be tested on all
// operating systems. // operating systems.

View File

@ -40,7 +40,7 @@ func sourceLine(n ast.Node) int {
} }
// ReadGo populates f with information learned from reading the // ReadGo populates f with information learned from reading the
// Go source file with the given file name. It gathers the C preamble // Go source file with the given file name. It gathers the C preamble
// attached to the import "C" comment, a list of references to C.xxx, // attached to the import "C" comment, a list of references to C.xxx,
// a list of exported functions, and the actual AST, to be rewritten and // a list of exported functions, and the actual AST, to be rewritten and
// printed. // printed.

View File

@ -83,7 +83,7 @@ func (f *File) DiscardCgoDirectives() {
f.Preamble = strings.Join(linesOut, "\n") f.Preamble = strings.Join(linesOut, "\n")
} }
// addToFlag appends args to flag. All flags are later written out onto the // addToFlag appends args to flag. All flags are later written out onto the
// _cgo_flags file for the build system to use. // _cgo_flags file for the build system to use.
func (p *Package) addToFlag(flag string, args []string) { func (p *Package) addToFlag(flag string, args []string) {
p.CgoFlags[flag] = append(p.CgoFlags[flag], args...) p.CgoFlags[flag] = append(p.CgoFlags[flag], args...)
@ -99,7 +99,7 @@ func (p *Package) addToFlag(flag string, args []string) {
// Single quotes and double quotes are recognized to prevent splitting within the // Single quotes and double quotes are recognized to prevent splitting within the
// quoted region, and are removed from the resulting substrings. If a quote in s // quoted region, and are removed from the resulting substrings. If a quote in s
// isn't closed err will be set and r will have the unclosed argument as the // isn't closed err will be set and r will have the unclosed argument as the
// last element. The backslash is used for escaping. // last element. The backslash is used for escaping.
// //
// For example, the following string: // For example, the following string:
// //
@ -236,7 +236,7 @@ func (p *Package) guessKinds(f *File) []*Name {
if isConst { if isConst {
n.Kind = "const" n.Kind = "const"
// Turn decimal into hex, just for consistency // Turn decimal into hex, just for consistency
// with enum-derived constants. Otherwise // with enum-derived constants. Otherwise
// in the cgo -godefs output half the constants // in the cgo -godefs output half the constants
// are in hex and half are in whatever the #define used. // are in hex and half are in whatever the #define used.
i, err := strconv.ParseInt(n.Define, 0, 64) i, err := strconv.ParseInt(n.Define, 0, 64)
@ -385,7 +385,7 @@ func (p *Package) guessKinds(f *File) []*Name {
if nerrors > 0 { if nerrors > 0 {
// Check if compiling the preamble by itself causes any errors, // Check if compiling the preamble by itself causes any errors,
// because the messages we've printed out so far aren't helpful // because the messages we've printed out so far aren't helpful
// to users debugging preamble mistakes. See issue 8442. // to users debugging preamble mistakes. See issue 8442.
preambleErrors := p.gccErrors([]byte(f.Preamble)) preambleErrors := p.gccErrors([]byte(f.Preamble))
if len(preambleErrors) > 0 { if len(preambleErrors) > 0 {
error_(token.NoPos, "\n%s errors for preamble:\n%s", p.gccBaseCmd()[0], preambleErrors) error_(token.NoPos, "\n%s errors for preamble:\n%s", p.gccBaseCmd()[0], preambleErrors)
@ -403,7 +403,7 @@ func (p *Package) guessKinds(f *File) []*Name {
// being referred to as C.xxx. // being referred to as C.xxx.
func (p *Package) loadDWARF(f *File, names []*Name) { func (p *Package) loadDWARF(f *File, names []*Name) {
// Extract the types from the DWARF section of an object // Extract the types from the DWARF section of an object
// from a well-formed C program. Gcc only generates DWARF info // from a well-formed C program. Gcc only generates DWARF info
// for symbols in the object file, so it is not enough to print the // for symbols in the object file, so it is not enough to print the
// preamble and hope the symbols we care about will be there. // preamble and hope the symbols we care about will be there.
// Instead, emit // Instead, emit
@ -421,7 +421,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
} }
// Apple's LLVM-based gcc does not include the enumeration // Apple's LLVM-based gcc does not include the enumeration
// names and values in its DWARF debug output. In case we're // names and values in its DWARF debug output. In case we're
// using such a gcc, create a data block initialized with the values. // using such a gcc, create a data block initialized with the values.
// We can read them out of the object file. // We can read them out of the object file.
fmt.Fprintf(&b, "long long __cgodebug_data[] = {\n") fmt.Fprintf(&b, "long long __cgodebug_data[] = {\n")
@ -594,7 +594,7 @@ func (p *Package) rewriteCalls(f *File) {
} }
} }
// rewriteCall rewrites one call to add pointer checks. We replace // rewriteCall rewrites one call to add pointer checks. We replace
// each pointer argument x with _cgoCheckPointer(x).(T). // each pointer argument x with _cgoCheckPointer(x).(T).
func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) { func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
for i, param := range name.FuncType.Params { for i, param := range name.FuncType.Params {
@ -642,13 +642,13 @@ func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
} else { } else {
// In order for the type assertion to succeed, // In order for the type assertion to succeed,
// we need it to match the actual type of the // we need it to match the actual type of the
// argument. The only type we have is the // argument. The only type we have is the
// type of the function parameter. We know // type of the function parameter. We know
// that the argument type must be assignable // that the argument type must be assignable
// to the function parameter type, or the code // to the function parameter type, or the code
// would not compile, but there is nothing // would not compile, but there is nothing
// requiring that the types be exactly the // requiring that the types be exactly the
// same. Add a type conversion to the // same. Add a type conversion to the
// argument so that the type assertion will // argument so that the type assertion will
// succeed. // succeed.
c.Args[0] = &ast.CallExpr{ c.Args[0] = &ast.CallExpr{
@ -675,7 +675,7 @@ func (p *Package) needsPointerCheck(f *File, t ast.Expr) bool {
return p.hasPointer(f, t, true) return p.hasPointer(f, t, true)
} }
// hasPointer is used by needsPointerCheck. If top is true it returns // hasPointer is used by needsPointerCheck. If top is true it returns
// whether t is or contains a pointer that might point to a pointer. // whether t is or contains a pointer that might point to a pointer.
// If top is false it returns whether t is or contains a pointer. // If top is false it returns whether t is or contains a pointer.
// f may be nil. // f may be nil.
@ -732,7 +732,7 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
if goTypes[t.Name] != nil { if goTypes[t.Name] != nil {
return false return false
} }
// We can't figure out the type. Conservative // We can't figure out the type. Conservative
// approach is to assume it has a pointer. // approach is to assume it has a pointer.
return true return true
case *ast.SelectorExpr: case *ast.SelectorExpr:
@ -750,7 +750,7 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
if name != nil && name.Kind == "type" && name.Type != nil && name.Type.Go != nil { if name != nil && name.Kind == "type" && name.Type != nil && name.Type.Go != nil {
return p.hasPointer(f, name.Type.Go, top) return p.hasPointer(f, name.Type.Go, top)
} }
// We can't figure out the type. Conservative // We can't figure out the type. Conservative
// approach is to assume it has a pointer. // approach is to assume it has a pointer.
return true return true
default: default:
@ -760,14 +760,14 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
} }
// checkAddrArgs tries to add arguments to the call of // checkAddrArgs tries to add arguments to the call of
// _cgoCheckPointer when the argument is an address expression. We // _cgoCheckPointer when the argument is an address expression. We
// pass true to mean that the argument is an address operation of // pass true to mean that the argument is an address operation of
// something other than a slice index, which means that it's only // something other than a slice index, which means that it's only
// necessary to check the specific element pointed to, not the entire // necessary to check the specific element pointed to, not the entire
// object. This is for &s.f, where f is a field in a struct. We can // object. This is for &s.f, where f is a field in a struct. We can
// pass a slice or array, meaning that we should check the entire // pass a slice or array, meaning that we should check the entire
// slice or array but need not check any other part of the object. // slice or array but need not check any other part of the object.
// This is for &s.a[i], where we need to check all of a. However, we // This is for &s.a[i], where we need to check all of a. However, we
// only pass the slice or array if we can refer to it without side // only pass the slice or array if we can refer to it without side
// effects. // effects.
func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr { func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr {
@ -786,7 +786,7 @@ func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr
index, ok := u.X.(*ast.IndexExpr) index, ok := u.X.(*ast.IndexExpr)
if !ok { if !ok {
// This is the address of something that is not an // This is the address of something that is not an
// index expression. We only need to examine the // index expression. We only need to examine the
// single value to which it points. // single value to which it points.
// TODO: what if true is shadowed? // TODO: what if true is shadowed?
return append(args, ast.NewIdent("true")) return append(args, ast.NewIdent("true"))
@ -853,10 +853,10 @@ func (p *Package) isType(t ast.Expr) bool {
return false return false
} }
// unsafeCheckPointerName is given the Go version of a C type. If the // unsafeCheckPointerName is given the Go version of a C type. If the
// type uses unsafe.Pointer, we arrange to build a version of // type uses unsafe.Pointer, we arrange to build a version of
// _cgoCheckPointer that returns that type. This avoids using a type // _cgoCheckPointer that returns that type. This avoids using a type
// assertion to unsafe.Pointer in our copy of user code. We return // assertion to unsafe.Pointer in our copy of user code. We return
// the name of the _cgoCheckPointer function we are going to build, or // the name of the _cgoCheckPointer function we are going to build, or
// the empty string if the type does not use unsafe.Pointer. // the empty string if the type does not use unsafe.Pointer.
func (p *Package) unsafeCheckPointerName(t ast.Expr) string { func (p *Package) unsafeCheckPointerName(t ast.Expr) string {
@ -906,7 +906,7 @@ func (p *Package) unsafeCheckPointerNameIndex(i int) string {
// rewriteRef rewrites all the C.xxx references in f.AST to refer to the // rewriteRef rewrites all the C.xxx references in f.AST to refer to the
// Go equivalents, now that we have figured out the meaning of all // Go equivalents, now that we have figured out the meaning of all
// the xxx. In *godefs mode, rewriteRef replaces the names // the xxx. In *godefs mode, rewriteRef replaces the names
// with full definitions instead of mangled names. // with full definitions instead of mangled names.
func (p *Package) rewriteRef(f *File) { func (p *Package) rewriteRef(f *File) {
// Keep a list of all the functions, to remove the ones // Keep a list of all the functions, to remove the ones
@ -929,7 +929,7 @@ func (p *Package) rewriteRef(f *File) {
// Now that we have all the name types filled in, // Now that we have all the name types filled in,
// scan through the Refs to identify the ones that // scan through the Refs to identify the ones that
// are trying to do a ,err call. Also check that // are trying to do a ,err call. Also check that
// functions are only used in calls. // functions are only used in calls.
for _, r := range f.Ref { for _, r := range f.Ref {
if r.Name.Kind == "const" && r.Name.Const == "" { if r.Name.Kind == "const" && r.Name.Const == "" {
@ -987,7 +987,7 @@ func (p *Package) rewriteRef(f *File) {
f.Name[fpName] = name f.Name[fpName] = name
} }
r.Name = name r.Name = name
// Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr // Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr
// function is defined in out.go and simply returns its argument. See // function is defined in out.go and simply returns its argument. See
// issue 7757. // issue 7757.
expr = &ast.CallExpr{ expr = &ast.CallExpr{
@ -1155,7 +1155,7 @@ func (p *Package) gccDebug(stdin []byte) (*dwarf.Data, binary.ByteOrder, []byte)
for i := range f.Symtab.Syms { for i := range f.Symtab.Syms {
s := &f.Symtab.Syms[i] s := &f.Symtab.Syms[i]
if isDebugData(s.Name) { if isDebugData(s.Name) {
// Found it. Now find data section. // Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) { if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i] sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size { if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
@ -1182,7 +1182,7 @@ func (p *Package) gccDebug(stdin []byte) (*dwarf.Data, binary.ByteOrder, []byte)
for i := range symtab { for i := range symtab {
s := &symtab[i] s := &symtab[i]
if isDebugData(s.Name) { if isDebugData(s.Name) {
// Found it. Now find data section. // Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) { if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i] sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size { if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
@ -1235,7 +1235,7 @@ func (p *Package) gccDefines(stdin []byte) string {
} }
// gccErrors runs gcc over the C program stdin and returns // gccErrors runs gcc over the C program stdin and returns
// the errors that gcc prints. That is, this function expects // the errors that gcc prints. That is, this function expects
// gcc to fail. // gcc to fail.
func (p *Package) gccErrors(stdin []byte) string { func (p *Package) gccErrors(stdin []byte) string {
// TODO(rsc): require failure // TODO(rsc): require failure
@ -1375,7 +1375,7 @@ var dwarfToName = map[string]string{
const signedDelta = 64 const signedDelta = 64
// String returns the current type representation. Format arguments // String returns the current type representation. Format arguments
// are assembled within this method so that any changes in mutable // are assembled within this method so that any changes in mutable
// values are taken into account. // values are taken into account.
func (tr *TypeRepr) String() string { func (tr *TypeRepr) String() string {
@ -1815,7 +1815,7 @@ func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
} }
case *dwarf.TypedefType: case *dwarf.TypedefType:
// C has much more relaxed rules than Go for // C has much more relaxed rules than Go for
// implicit type conversions. When the parameter // implicit type conversions. When the parameter
// is type T defined as *X, simulate a little of the // is type T defined as *X, simulate a little of the
// laxness of C by making the argument *X instead of T. // laxness of C by making the argument *X instead of T.
if ptr, ok := base(dt.Type).(*dwarf.PtrType); ok { if ptr, ok := base(dt.Type).(*dwarf.PtrType); ok {
@ -1831,7 +1831,7 @@ func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
} }
// Remember the C spelling, in case the struct // Remember the C spelling, in case the struct
// has __attribute__((unavailable)) on it. See issue 2888. // has __attribute__((unavailable)) on it. See issue 2888.
t.Typedef = dt.Name t.Typedef = dt.Name
} }
} }
@ -1846,7 +1846,7 @@ func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType {
for i, f := range dtype.ParamType { for i, f := range dtype.ParamType {
// gcc's DWARF generator outputs a single DotDotDotType parameter for // gcc's DWARF generator outputs a single DotDotDotType parameter for
// function pointers that specify no parameters (e.g. void // function pointers that specify no parameters (e.g. void
// (*__cgo_0)()). Treat this special case as void. This case is // (*__cgo_0)()). Treat this special case as void. This case is
// invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not // invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not
// legal). // legal).
if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 { if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 {
@ -1917,8 +1917,8 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
off := int64(0) off := int64(0)
// Rename struct fields that happen to be named Go keywords into // Rename struct fields that happen to be named Go keywords into
// _{keyword}. Create a map from C ident -> Go ident. The Go ident will // _{keyword}. Create a map from C ident -> Go ident. The Go ident will
// be mangled. Any existing identifier that already has the same name on // be mangled. Any existing identifier that already has the same name on
// the C-side will cause the Go-mangled version to be prefixed with _. // the C-side will cause the Go-mangled version to be prefixed with _.
// (e.g. in a struct with fields '_type' and 'type', the latter would be // (e.g. in a struct with fields '_type' and 'type', the latter would be
// rendered as '__type' in Go). // rendered as '__type' in Go).
@ -1958,7 +1958,7 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
// In godefs mode, if this field is a C11 // In godefs mode, if this field is a C11
// anonymous union then treat the first field in the // anonymous union then treat the first field in the
// union as the field in the struct. This handles // union as the field in the struct. This handles
// cases like the glibc <sys/resource.h> file; see // cases like the glibc <sys/resource.h> file; see
// issue 6677. // issue 6677.
if *godefs { if *godefs {
@ -2082,7 +2082,7 @@ func godefsFields(fld []*ast.Field) {
} }
// fieldPrefix returns the prefix that should be removed from all the // fieldPrefix returns the prefix that should be removed from all the
// field names when generating the C or Go code. For generated // field names when generating the C or Go code. For generated
// C, we leave the names as is (tv_sec, tv_usec), since that's what // C, we leave the names as is (tv_sec, tv_usec), since that's what
// people are used to seeing in C. For generated Go code, such as // people are used to seeing in C. For generated Go code, such as
// package syscall's data structures, we drop a common prefix // package syscall's data structures, we drop a common prefix
@ -2092,7 +2092,7 @@ func fieldPrefix(fld []*ast.Field) string {
for _, f := range fld { for _, f := range fld {
for _, n := range f.Names { for _, n := range f.Names {
// Ignore field names that don't have the prefix we're // Ignore field names that don't have the prefix we're
// looking for. It is common in C headers to have fields // looking for. It is common in C headers to have fields
// named, say, _pad in an otherwise prefixed header. // named, say, _pad in an otherwise prefixed header.
// If the struct has 3 fields tv_sec, tv_usec, _pad1, then we // If the struct has 3 fields tv_sec, tv_usec, _pad1, then we
// still want to remove the tv_ prefix. // still want to remove the tv_ prefix.

View File

@ -190,9 +190,9 @@ func main() {
if *dynobj != "" { if *dynobj != "" {
// cgo -dynimport is essentially a separate helper command // cgo -dynimport is essentially a separate helper command
// built into the cgo binary. It scans a gcc-produced executable // built into the cgo binary. It scans a gcc-produced executable
// and dumps information about the imported symbols and the // and dumps information about the imported symbols and the
// imported libraries. The 'go build' rules for cgo prepare an // imported libraries. The 'go build' rules for cgo prepare an
// appropriate executable and then use its import information // appropriate executable and then use its import information
// instead of needing to make the linkers duplicate all the // instead of needing to make the linkers duplicate all the
// specialized knowledge gcc has about where to look for imported // specialized knowledge gcc has about where to look for imported

View File

@ -55,7 +55,7 @@ func (p *Package) writeDefs() {
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n") fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
} else { } else {
// If we're not importing runtime/cgo, we *are* runtime/cgo, // If we're not importing runtime/cgo, we *are* runtime/cgo,
// which provides these functions. We just need a prototype. // which provides these functions. We just need a prototype.
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int), void *a, int c);\n") fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int), void *a, int c);\n")
fmt.Fprintf(fm, "void _cgo_wait_runtime_init_done();\n") fmt.Fprintf(fm, "void _cgo_wait_runtime_init_done();\n")
} }
@ -592,7 +592,7 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
// the Go equivalents had good type params. // the Go equivalents had good type params.
// However, our version of the type omits the magic // However, our version of the type omits the magic
// words const and volatile, which can provoke // words const and volatile, which can provoke
// C compiler warnings. Silence them by casting // C compiler warnings. Silence them by casting
// all pointers to void*. (Eventually that will produce // all pointers to void*. (Eventually that will produce
// other warnings.) // other warnings.)
if c := t.C.String(); c[len(c)-1] == '*' { if c := t.C.String(); c[len(c)-1] == '*' {
@ -616,8 +616,8 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
fmt.Fprintf(fgcc, "\n") fmt.Fprintf(fgcc, "\n")
} }
// Write out a wrapper for a function when using gccgo. This is a // Write out a wrapper for a function when using gccgo. This is a
// simple wrapper that just calls the real function. We only need a // simple wrapper that just calls the real function. We only need a
// wrapper to support static functions in the prologue--without a // wrapper to support static functions in the prologue--without a
// wrapper, we can't refer to the function, since the reference is in // wrapper, we can't refer to the function, since the reference is in
// a different file. // a different file.
@ -707,7 +707,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fn := exp.Func fn := exp.Func
// Construct a gcc struct matching the gc argument and // Construct a gcc struct matching the gc argument and
// result frame. The gcc struct will be compiled with // result frame. The gcc struct will be compiled with
// __attribute__((packed)) so all padding must be accounted // __attribute__((packed)) so all padding must be accounted
// for explicitly. // for explicitly.
ctype := "struct {\n" ctype := "struct {\n"

View File

@ -84,7 +84,7 @@ func lineno(pos token.Pos) string {
// Die with an error message. // Die with an error message.
func fatalf(msg string, args ...interface{}) { func fatalf(msg string, args ...interface{}) {
// If we've already printed other errors, they might have // If we've already printed other errors, they might have
// caused the fatal condition. Assume they're enough. // caused the fatal condition. Assume they're enough.
if nerrors == 0 { if nerrors == 0 {
fmt.Fprintf(os.Stderr, msg+"\n", args...) fmt.Fprintf(os.Stderr, msg+"\n", args...)
} }

View File

@ -252,14 +252,14 @@ loop1:
// MOVLQZX removal. // MOVLQZX removal.
// The MOVLQZX exists to avoid being confused for a // The MOVLQZX exists to avoid being confused for a
// MOVL that is just copying 32-bit data around during // MOVL that is just copying 32-bit data around during
// copyprop. Now that copyprop is done, remov MOVLQZX R1, R2 // copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
// if it is dominated by an earlier ADDL/MOVL/etc into R1 that // if it is dominated by an earlier ADDL/MOVL/etc into R1 that
// will have already cleared the high bits. // will have already cleared the high bits.
// //
// MOVSD removal. // MOVSD removal.
// We never use packed registers, so a MOVSD between registers // We never use packed registers, so a MOVSD between registers
// can be replaced by MOVAPD, which moves the pair of float64s // can be replaced by MOVAPD, which moves the pair of float64s
// instead of just the lower one. We only use the lower one, but // instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both. // the processor can do better if we do moves using both.
for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog p = r.Prog

View File

@ -126,7 +126,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var ah gc.Node var ah gc.Node
gc.Regalloc(&ah, hi1.Type, nil) gc.Regalloc(&ah, hi1.Type, nil)
// Do op. Leave result in ah:al. // Do op. Leave result in ah:al.
switch n.Op { switch n.Op {
default: default:
gc.Fatalf("cgen64: not implemented: %v\n", n) gc.Fatalf("cgen64: not implemented: %v\n", n)

View File

@ -129,7 +129,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// TODO(austin): Instead of generating ADD $-8,R8; ADD // TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the // generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more // ADDs. That will produce shorter, more
// pipeline-able code. // pipeline-able code.
var p *obj.Prog var p *obj.Prog
for ; c > 0; c-- { for ; c > 0; c-- {

View File

@ -442,7 +442,7 @@ func benchmarkBitLenN(b *testing.B, nbits uint) {
} }
} }
// Individual bitLen tests. Numbers chosen to examine both sides // Individual bitLen tests. Numbers chosen to examine both sides
// of powers-of-two boundaries. // of powers-of-two boundaries.
func BenchmarkBitLen0(b *testing.B) { benchmarkBitLenN(b, 0) } func BenchmarkBitLen0(b *testing.B) { benchmarkBitLenN(b, 0) }
func BenchmarkBitLen1(b *testing.B) { benchmarkBitLenN(b, 1) } func BenchmarkBitLen1(b *testing.B) { benchmarkBitLenN(b, 1) }

View File

@ -647,7 +647,7 @@ func trailingZeroBits(x Word) uint {
// x & -x leaves only the right-most bit set in the word. Let k be the // x & -x leaves only the right-most bit set in the word. Let k be the
// index of that bit. Since only a single bit is set, the value is two // index of that bit. Since only a single bit is set, the value is two
// to the power of k. Multiplying by a power of two is equivalent to // to the power of k. Multiplying by a power of two is equivalent to
// left shifting, in this case by k bits. The de Bruijn constant is // left shifting, in this case by k bits. The de Bruijn constant is
// such that all six bit, consecutive substrings are distinct. // such that all six bit, consecutive substrings are distinct.
// Therefore, if we have a left shifted version of this constant we can // Therefore, if we have a left shifted version of this constant we can
// find by how many bits it was shifted by looking at which six bit // find by how many bits it was shifted by looking at which six bit
@ -1018,7 +1018,7 @@ func (z nat) expNNWindowed(x, y, m nat) nat {
for j := 0; j < _W; j += n { for j := 0; j < _W; j += n {
if i != len(y)-1 || j != 0 { if i != len(y)-1 || j != 0 {
// Unrolled loop for significant performance // Unrolled loop for significant performance
// gain. Use go test -bench=".*" in crypto/rsa // gain. Use go test -bench=".*" in crypto/rsa
// to check performance before making changes. // to check performance before making changes.
zz = zz.mul(z, z) zz = zz.mul(z, z)
zz, z = z, zz zz, z = z, zz

View File

@ -63,7 +63,7 @@ func (z *Rat) SetFloat64(f float64) *Rat {
// quotToFloat32 returns the non-negative float32 value // quotToFloat32 returns the non-negative float32 value
// nearest to the quotient a/b, using round-to-even in // nearest to the quotient a/b, using round-to-even in
// halfway cases. It does not mutate its arguments. // halfway cases. It does not mutate its arguments.
// Preconditions: b is non-zero; a and b have no common factors. // Preconditions: b is non-zero; a and b have no common factors.
func quotToFloat32(a, b nat) (f float32, exact bool) { func quotToFloat32(a, b nat) (f float32, exact bool) {
const ( const (
@ -161,7 +161,7 @@ func quotToFloat32(a, b nat) (f float32, exact bool) {
// quotToFloat64 returns the non-negative float64 value // quotToFloat64 returns the non-negative float64 value
// nearest to the quotient a/b, using round-to-even in // nearest to the quotient a/b, using round-to-even in
// halfway cases. It does not mutate its arguments. // halfway cases. It does not mutate its arguments.
// Preconditions: b is non-zero; a and b have no common factors. // Preconditions: b is non-zero; a and b have no common factors.
func quotToFloat64(a, b nat) (f float64, exact bool) { func quotToFloat64(a, b nat) (f float64, exact bool) {
const ( const (

View File

@ -137,7 +137,7 @@ func TestFloatString(t *testing.T) {
} }
} }
// Test inputs to Rat.SetString. The prefix "long:" causes the test // Test inputs to Rat.SetString. The prefix "long:" causes the test
// to be skipped in --test.short mode. (The threshold is about 500us.) // to be skipped in --test.short mode. (The threshold is about 500us.)
var float64inputs = []string{ var float64inputs = []string{
// Constants plundered from strconv/testfp.txt. // Constants plundered from strconv/testfp.txt.

View File

@ -406,7 +406,7 @@ func geneq(sym *Sym, t *Type) {
// An array of pure memory would be handled by the // An array of pure memory would be handled by the
// standard memequal, so the element type must not be // standard memequal, so the element type must not be
// pure memory. Even if we unrolled the range loop, // pure memory. Even if we unrolled the range loop,
// each iteration would be a function call, so don't bother // each iteration would be a function call, so don't bother
// unrolling. // unrolling.
nrange := Nod(ORANGE, nil, Nod(OIND, np, nil)) nrange := Nod(ORANGE, nil, Nod(OIND, np, nil))

View File

@ -86,9 +86,9 @@ func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
} }
// For nonzero-sized structs which end in a zero-sized thing, we add // For nonzero-sized structs which end in a zero-sized thing, we add
// an extra byte of padding to the type. This padding ensures that // an extra byte of padding to the type. This padding ensures that
// taking the address of the zero-sized thing can't manufacture a // taking the address of the zero-sized thing can't manufacture a
// pointer to the next object in the heap. See issue 9401. // pointer to the next object in the heap. See issue 9401.
if flag == 1 && o > starto && o == lastzero { if flag == 1 && o > starto && o == lastzero {
o++ o++
} }

View File

@ -248,7 +248,7 @@ func (p *importer) typ() *Type {
// (comment from go.y) // (comment from go.y)
// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
// out by typecheck's lookdot as this $$.ttype. So by providing // out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there. // this back link here we avoid special casing there.
n.Type.Nname = n n.Type.Nname = n

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// NOTE: If you change this file you must run "go generate" // NOTE: If you change this file you must run "go generate"
// to update builtin.go. This is not done automatically // to update builtin.go. This is not done automatically
// to avoid depending on having a working compiler binary. // to avoid depending on having a working compiler binary.
// +build ignore // +build ignore

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// NOTE: If you change this file you must run "go generate" // NOTE: If you change this file you must run "go generate"
// to update builtin.go. This is not done automatically // to update builtin.go. This is not done automatically
// to avoid depending on having a working compiler binary. // to avoid depending on having a working compiler binary.
// +build ignore // +build ignore

View File

@ -2296,7 +2296,7 @@ func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) || wb && osrc != -1000 { if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) || wb && osrc != -1000 {
// osrc and odst both on stack, and at least one is in // osrc and odst both on stack, and at least one is in
// an unknown position. Could generate code to test // an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy // for forward/backward copy, but instead just copy
// to a temporary location first. // to a temporary location first.
// //

View File

@ -167,7 +167,7 @@ func declare(n *Node, ctxt Class) {
n.Lineno = int32(parserline()) n.Lineno = int32(parserline())
s := n.Sym s := n.Sym
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
if importpkg == nil && !typecheckok && s.Pkg != localpkg { if importpkg == nil && !typecheckok && s.Pkg != localpkg {
Yyerror("cannot declare name %v", s) Yyerror("cannot declare name %v", s)
} }
@ -1021,7 +1021,7 @@ func embedded(s *Sym, pkg *Pkg) *Node {
CenterDot = 0xB7 CenterDot = 0xB7
) )
// Names sometimes have disambiguation junk // Names sometimes have disambiguation junk
// appended after a center dot. Discard it when // appended after a center dot. Discard it when
// making the name for the embedded struct field. // making the name for the embedded struct field.
name := s.Name name := s.Name

View File

@ -15,7 +15,7 @@ import (
// or single non-recursive functions, bottom up. // or single non-recursive functions, bottom up.
// //
// Finding these sets is finding strongly connected components // Finding these sets is finding strongly connected components
// in the static call graph. The algorithm for doing that is taken // in the static call graph. The algorithm for doing that is taken
// from Sedgewick, Algorithms, Second Edition, p. 482, with two // from Sedgewick, Algorithms, Second Edition, p. 482, with two
// adaptations. // adaptations.
// //
@ -168,7 +168,7 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
// //
// First escfunc, esc and escassign recurse over the ast of each // First escfunc, esc and escassign recurse over the ast of each
// function to dig out flow(dst,src) edges between any // function to dig out flow(dst,src) edges between any
// pointer-containing nodes and store them in dst->escflowsrc. For // pointer-containing nodes and store them in dst->escflowsrc. For
// variables assigned to a variable in an outer scope or used as a // variables assigned to a variable in an outer scope or used as a
// return value, they store a flow(theSink, src) edge to a fake node // return value, they store a flow(theSink, src) edge to a fake node
// 'the Sink'. For variables referenced in closures, an edge // 'the Sink'. For variables referenced in closures, an edge
@ -180,7 +180,7 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
// parameters it can reach as leaking. // parameters it can reach as leaking.
// //
// If a value's address is taken but the address does not escape, // If a value's address is taken but the address does not escape,
// then the value can stay on the stack. If the value new(T) does // then the value can stay on the stack. If the value new(T) does
// not escape, then new(T) can be rewritten into a stack allocation. // not escape, then new(T) can be rewritten into a stack allocation.
// The same is true of slice literals. // The same is true of slice literals.
// //
@ -340,7 +340,7 @@ func (e *EscState) track(n *Node) {
} }
// Escape constants are numbered in order of increasing "escapiness" // Escape constants are numbered in order of increasing "escapiness"
// to help make inferences be monotonic. With the exception of // to help make inferences be monotonic. With the exception of
// EscNever which is sticky, eX < eY means that eY is more exposed // EscNever which is sticky, eX < eY means that eY is more exposed
// than eX, and hence replaces it in a conservative analysis. // than eX, and hence replaces it in a conservative analysis.
const ( const (
@ -378,7 +378,7 @@ func escMax(e, etype uint16) uint16 {
} }
// For each input parameter to a function, the escapeReturnEncoding describes // For each input parameter to a function, the escapeReturnEncoding describes
// how the parameter may leak to the function's outputs. This is currently the // how the parameter may leak to the function's outputs. This is currently the
// "level" of the leak where level is 0 or larger (negative level means stored into // "level" of the leak where level is 0 or larger (negative level means stored into
// something whose address is returned -- but that implies stored into the heap, // something whose address is returned -- but that implies stored into the heap,
// hence EscHeap, which means that the details are not currently relevant. ) // hence EscHeap, which means that the details are not currently relevant. )
@ -524,7 +524,7 @@ func escfunc(e *EscState, func_ *Node) {
// Mark labels that have no backjumps to them as not increasing e->loopdepth. // Mark labels that have no backjumps to them as not increasing e->loopdepth.
// Walk hasn't generated (goto|label)->left->sym->label yet, so we'll cheat // Walk hasn't generated (goto|label)->left->sym->label yet, so we'll cheat
// and set it to one of the following two. Then in esc we'll clear it again. // and set it to one of the following two. Then in esc we'll clear it again.
var looping Label var looping Label
var nonlooping Label var nonlooping Label
@ -1099,7 +1099,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
// Might be pointer arithmetic, in which case // Might be pointer arithmetic, in which case
// the operands flow into the result. // the operands flow into the result.
// TODO(rsc): Decide what the story is here. This is unsettling. // TODO(rsc): Decide what the story is here. This is unsettling.
case OADD, case OADD,
OSUB, OSUB,
OOR, OOR,
@ -1128,7 +1128,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
// flow are 000, 001, 010, 011 and EEEE is computed Esc bits. // flow are 000, 001, 010, 011 and EEEE is computed Esc bits.
// Note width of xxx depends on value of constant // Note width of xxx depends on value of constant
// bitsPerOutputInTag -- expect 2 or 3, so in practice the // bitsPerOutputInTag -- expect 2 or 3, so in practice the
// tag cache array is 64 or 128 long. Some entries will // tag cache array is 64 or 128 long. Some entries will
// never be populated. // never be populated.
var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string
@ -1290,7 +1290,7 @@ func (e *EscState) addDereference(n *Node) *Node {
if Istype(t, Tptr) { if Istype(t, Tptr) {
// This should model our own sloppy use of OIND to encode // This should model our own sloppy use of OIND to encode
// decreasing levels of indirection; i.e., "indirecting" an array // decreasing levels of indirection; i.e., "indirecting" an array
// might yield the type of an element. To be enhanced... // might yield the type of an element. To be enhanced...
t = t.Type t = t.Type
} }
ind.Type = t ind.Type = t
@ -1419,7 +1419,7 @@ func esccall(e *EscState, n *Node, up *Node) {
fmt.Printf("%v::esccall:: %v in recursive group\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort)) fmt.Printf("%v::esccall:: %v in recursive group\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort))
} }
// function in same mutually recursive group. Incorporate into flow graph. // function in same mutually recursive group. Incorporate into flow graph.
// print("esc local fn: %N\n", fn->ntype); // print("esc local fn: %N\n", fn->ntype);
if fn.Name.Defn.Esc == EscFuncUnknown || nE.Escretval != nil { if fn.Name.Defn.Esc == EscFuncUnknown || nE.Escretval != nil {
Fatalf("graph inconsistency") Fatalf("graph inconsistency")
@ -1469,7 +1469,7 @@ func esccall(e *EscState, n *Node, up *Node) {
return return
} }
// Imported or completely analyzed function. Use the escape tags. // Imported or completely analyzed function. Use the escape tags.
if nE.Escretval != nil { if nE.Escretval != nil {
Fatalf("esc already decorated call %v\n", Nconv(n, obj.FmtSign)) Fatalf("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
} }

View File

@ -69,7 +69,7 @@ var fmtbody bool
// E.g. for %S: %+S %#S %-S print an identifier properly qualified for debug/export/internal mode. // E.g. for %S: %+S %#S %-S print an identifier properly qualified for debug/export/internal mode.
// //
// The mode flags +, - and # are sticky, meaning they persist through // The mode flags +, - and # are sticky, meaning they persist through
// recursions of %N, %T and %S, but not the h and l flags. The u flag is // recursions of %N, %T and %S, but not the h and l flags. The u flag is
// sticky only on %T recursions and only used in %-/Sym mode. // sticky only on %T recursions and only used in %-/Sym mode.
// //
@ -796,7 +796,7 @@ func stmtfmt(n *Node) string {
// some statements allow for an init, but at most one, // some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck // but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing // and inlining. If it doesn't fit the syntax, emit an enclosing
// block starting with the init statements. // block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so // if we can just say "for" n->ninit; ... then do so

View File

@ -17,7 +17,7 @@ import (
) )
// Make sure "hello world" does not link in all the // Make sure "hello world" does not link in all the
// fmt.scanf routines. See issue 6853. // fmt.scanf routines. See issue 6853.
func TestScanfRemoval(t *testing.T) { func TestScanfRemoval(t *testing.T) {
testenv.MustHaveGoBuild(t) testenv.MustHaveGoBuild(t)
@ -64,7 +64,7 @@ func main() {
} }
} }
// Make sure -S prints assembly code. See issue 14515. // Make sure -S prints assembly code. See issue 14515.
func TestDashS(t *testing.T) { func TestDashS(t *testing.T) {
testenv.MustHaveGoBuild(t) testenv.MustHaveGoBuild(t)
@ -99,7 +99,7 @@ func main() {
patterns := []string{ patterns := []string{
// It is hard to look for actual instructions in an // It is hard to look for actual instructions in an
// arch-independent way. So we'll just look for // arch-independent way. So we'll just look for
// pseudo-ops that are arch-independent. // pseudo-ops that are arch-independent.
"\tTEXT\t", "\tTEXT\t",
"\tFUNCDATA\t", "\tFUNCDATA\t",

View File

@ -43,7 +43,7 @@ var inlretlabel *Node // target of the goto substituted in place of a return
var inlretvars *NodeList // temp out variables var inlretvars *NodeList // temp out variables
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods // Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type. // the ->sym can be re-used in the local package, so peel it off the receiver's type.
func fnpkg(fn *Node) *Pkg { func fnpkg(fn *Node) *Pkg {
if fn.Type.Thistuple != 0 { if fn.Type.Thistuple != 0 {
@ -63,7 +63,7 @@ func fnpkg(fn *Node) *Pkg {
return fn.Sym.Pkg return fn.Sym.Pkg
} }
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body. // because they're a copy of an already checked body.
func typecheckinl(fn *Node) { func typecheckinl(fn *Node) {
lno := int(setlineno(fn)) lno := int(setlineno(fn))
@ -300,7 +300,7 @@ func inlcopyslice(ll []*Node) []*Node {
} }
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point. // calls made to inlineable functions. This is the external entry point.
func inlcalls(fn *Node) { func inlcalls(fn *Node) {
savefn := Curfn savefn := Curfn
Curfn = fn Curfn = fn
@ -358,7 +358,7 @@ func inlnodeslice(l []*Node) {
} }
// inlnode recurses over the tree to find inlineable calls, which will // inlnode recurses over the tree to find inlineable calls, which will
// be turned into OINLCALLs by mkinlcall. When the recursion comes // be turned into OINLCALLs by mkinlcall. When the recursion comes
// back up will examine left, right, list, rlist, ninit, ntest, nincr, // back up will examine left, right, list, rlist, ninit, ntest, nincr,
// nbody and nelse and use one of the 4 inlconv/glue functions above // nbody and nelse and use one of the 4 inlconv/glue functions above
// to turn the OINLCALL into an expression, a statement, or patch it // to turn the OINLCALL into an expression, a statement, or patch it
@ -881,7 +881,7 @@ func inlvar(var_ *Node) *Node {
// This may no longer be necessary now that we run escape analysis // This may no longer be necessary now that we run escape analysis
// after wrapper generation, but for 1.5 this is conservatively left // after wrapper generation, but for 1.5 this is conservatively left
// unchanged. See bugs 11053 and 9537. // unchanged. See bugs 11053 and 9537.
if var_.Esc == EscHeap { if var_.Esc == EscHeap {
addrescapes(n) addrescapes(n)
} }

View File

@ -10,7 +10,7 @@ import (
) )
// Rewrite tree to use separate statements to enforce // Rewrite tree to use separate statements to enforce
// order of evaluation. Makes walk easier, because it // order of evaluation. Makes walk easier, because it
// can (after this runs) reorder at will within an expression. // can (after this runs) reorder at will within an expression.
// //
// Rewrite x op= y into x = x op y. // Rewrite x op= y into x = x op y.

View File

@ -2011,7 +2011,7 @@ func (p *parser) hidden_fndcl() *Node {
// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
// out by typecheck's lookdot as this $$.ttype. So by providing // out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there. // this back link here we avoid special casing there.
ss.Type.Nname = ss ss.Type.Nname = ss
return ss return ss

View File

@ -29,7 +29,7 @@ const (
// An ordinary basic block. // An ordinary basic block.
// //
// Instructions are threaded together in a doubly-linked list. To iterate in // Instructions are threaded together in a doubly-linked list. To iterate in
// program order follow the link pointer from the first node and stop after the // program order follow the link pointer from the first node and stop after the
// last node has been visited // last node has been visited
// //
@ -122,7 +122,7 @@ func addedge(from *BasicBlock, to *BasicBlock) {
} }
// Inserts prev before curr in the instruction // Inserts prev before curr in the instruction
// stream. Any control flow, such as branches or fall-throughs, that target the // stream. Any control flow, such as branches or fall-throughs, that target the
// existing instruction are adjusted to target the new instruction. // existing instruction are adjusted to target the new instruction.
func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) { func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
// There may be other instructions pointing at curr, // There may be other instructions pointing at curr,
@ -181,9 +181,9 @@ func printblock(bb *BasicBlock) {
} }
} }
// Iterates over a basic block applying a callback to each instruction. There // Iterates over a basic block applying a callback to each instruction. There
// are two criteria for termination. If the end of basic block is reached a // are two criteria for termination. If the end of basic block is reached a
// value of zero is returned. If the callback returns a non-zero value, the // value of zero is returned. If the callback returns a non-zero value, the
// iteration is stopped and the value of the callback is returned. // iteration is stopped and the value of the callback is returned.
func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool { func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) { for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
@ -244,7 +244,7 @@ func getvariables(fn *Node) []*Node {
return result return result
} }
// A pretty printer for control flow graphs. Takes an array of BasicBlock*s. // A pretty printer for control flow graphs. Takes an array of BasicBlock*s.
func printcfg(cfg []*BasicBlock) { func printcfg(cfg []*BasicBlock) {
for _, bb := range cfg { for _, bb := range cfg {
printblock(bb) printblock(bb)
@ -252,7 +252,7 @@ func printcfg(cfg []*BasicBlock) {
} }
// Assigns a reverse post order number to each connected basic block using the // Assigns a reverse post order number to each connected basic block using the
// standard algorithm. Unconnected blocks will not be affected. // standard algorithm. Unconnected blocks will not be affected.
func reversepostorder(root *BasicBlock, rpo *int32) { func reversepostorder(root *BasicBlock, rpo *int32) {
root.mark = VISITED root.mark = VISITED
for _, bb := range root.succ { for _, bb := range root.succ {
@ -272,7 +272,7 @@ func (x blockrpocmp) Len() int { return len(x) }
func (x blockrpocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x blockrpocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo } func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo }
// A pattern matcher for call instructions. Returns true when the instruction // A pattern matcher for call instructions. Returns true when the instruction
// is a call to a specific package qualified function name. // is a call to a specific package qualified function name.
func iscall(prog *obj.Prog, name *obj.LSym) bool { func iscall(prog *obj.Prog, name *obj.LSym) bool {
if prog == nil { if prog == nil {
@ -340,8 +340,8 @@ func isdeferreturn(prog *obj.Prog) bool {
} }
// Walk backwards from a runtime·selectgo call up to its immediately dominating // Walk backwards from a runtime·selectgo call up to its immediately dominating
// runtime·newselect call. Any successor nodes of communication clause nodes // runtime·newselect call. Any successor nodes of communication clause nodes
// are implicit successors of the runtime·selectgo call node. The goal of this // are implicit successors of the runtime·selectgo call node. The goal of this
// analysis is to add these missing edges to complete the control flow graph. // analysis is to add these missing edges to complete the control flow graph.
func addselectgosucc(selectgo *BasicBlock) { func addselectgosucc(selectgo *BasicBlock) {
var succ *BasicBlock var succ *BasicBlock
@ -379,7 +379,7 @@ func addselectgosucc(selectgo *BasicBlock) {
} }
} }
// The entry point for the missing selectgo control flow algorithm. Takes an // The entry point for the missing selectgo control flow algorithm. Takes an
// array of BasicBlock*s containing selectgo calls. // array of BasicBlock*s containing selectgo calls.
func fixselectgo(selectgo []*BasicBlock) { func fixselectgo(selectgo []*BasicBlock) {
for _, bb := range selectgo { for _, bb := range selectgo {
@ -387,15 +387,15 @@ func fixselectgo(selectgo []*BasicBlock) {
} }
} }
// Constructs a control flow graph from a sequence of instructions. This // Constructs a control flow graph from a sequence of instructions. This
// procedure is complicated by various sources of implicit control flow that are // procedure is complicated by various sources of implicit control flow that are
// not accounted for using the standard cfg construction algorithm. Returns an // not accounted for using the standard cfg construction algorithm. Returns an
// array of BasicBlock*s in control flow graph form (basic blocks ordered by // array of BasicBlock*s in control flow graph form (basic blocks ordered by
// their RPO number). // their RPO number).
func newcfg(firstp *obj.Prog) []*BasicBlock { func newcfg(firstp *obj.Prog) []*BasicBlock {
// Reset the opt field of each prog to nil. In the first and second // Reset the opt field of each prog to nil. In the first and second
// passes, instructions that are labels temporarily use the opt field to // passes, instructions that are labels temporarily use the opt field to
// point to their basic block. In the third pass, the opt field reset // point to their basic block. In the third pass, the opt field reset
// to point to the predecessor of an instruction in its basic block. // to point to the predecessor of an instruction in its basic block.
for p := firstp; p != nil; p = p.Link { for p := firstp; p != nil; p = p.Link {
p.Opt = nil p.Opt = nil
@ -436,7 +436,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
} }
// Loop through all basic blocks maximally growing the list of // Loop through all basic blocks maximally growing the list of
// contained instructions until a label is reached. Add edges // contained instructions until a label is reached. Add edges
// for branches and fall-through instructions. // for branches and fall-through instructions.
for _, bb := range cfg { for _, bb := range cfg {
for p := bb.last; p != nil && p.As != obj.AEND; p = p.Link { for p := bb.last; p != nil && p.As != obj.AEND; p = p.Link {
@ -448,7 +448,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
// Stop before an unreachable RET, to avoid creating // Stop before an unreachable RET, to avoid creating
// unreachable control flow nodes. // unreachable control flow nodes.
if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 { if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 {
// TODO: remove after SSA is done. SSA does not // TODO: remove after SSA is done. SSA does not
// generate any unreachable RET instructions. // generate any unreachable RET instructions.
break break
} }
@ -472,7 +472,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
} }
// Add back links so the instructions in a basic block can be traversed // Add back links so the instructions in a basic block can be traversed
// backward. This is the final state of the instruction opt field. // backward. This is the final state of the instruction opt field.
for _, bb := range cfg { for _, bb := range cfg {
p := bb.first p := bb.first
var prev *obj.Prog var prev *obj.Prog
@ -500,13 +500,13 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
rpo := int32(len(cfg)) rpo := int32(len(cfg))
reversepostorder(bb, &rpo) reversepostorder(bb, &rpo)
// Sort the basic blocks by their depth first number. The // Sort the basic blocks by their depth first number. The
// array is now a depth-first spanning tree with the first // array is now a depth-first spanning tree with the first
// node being the root. // node being the root.
sort.Sort(blockrpocmp(cfg)) sort.Sort(blockrpocmp(cfg))
// Unreachable control flow nodes are indicated by a -1 in the rpo // Unreachable control flow nodes are indicated by a -1 in the rpo
// field. If we see these nodes something must have gone wrong in an // field. If we see these nodes something must have gone wrong in an
// upstream compilation phase. // upstream compilation phase.
bb = cfg[0] bb = cfg[0]
if bb.rpo == -1 { if bb.rpo == -1 {
@ -536,7 +536,7 @@ func isfunny(n *Node) bool {
} }
// Computes the effects of an instruction on a set of // Computes the effects of an instruction on a set of
// variables. The vars argument is an array of Node*s. // variables. The vars argument is an array of Node*s.
// //
// The output vectors give bits for variables: // The output vectors give bits for variables:
// uevar - used by this instruction // uevar - used by this instruction
@ -555,8 +555,8 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarini
bvresetall(avarinit) bvresetall(avarinit)
if prog.As == obj.ARET { if prog.As == obj.ARET {
// Return instructions implicitly read all the arguments. For // Return instructions implicitly read all the arguments. For
// the sake of correctness, out arguments must be read. For the // the sake of correctness, out arguments must be read. For the
// sake of backtrace quality, we read in arguments as well. // sake of backtrace quality, we read in arguments as well.
// //
// A return instruction with a p->to is a tail return, which brings // A return instruction with a p->to is a tail return, which brings
@ -676,7 +676,7 @@ Next:
} }
// Constructs a new liveness structure used to hold the global state of the // Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is an array of BasicBlock*s and the // liveness computation. The cfg argument is an array of BasicBlock*s and the
// vars argument is an array of Node*s. // vars argument is an array of Node*s.
func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness { func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
result := new(Liveness) result := new(Liveness)
@ -721,7 +721,7 @@ func printeffects(p *obj.Prog, uevar Bvec, varkill Bvec, avarinit Bvec) {
fmt.Printf("\n") fmt.Printf("\n")
} }
// Pretty print a variable node. Uses Pascal like conventions for pointers and // Pretty print a variable node. Uses Pascal like conventions for pointers and
// addresses to avoid confusing the C like conventions used in the node variable // addresses to avoid confusing the C like conventions used in the node variable
// names. // names.
func printnode(node *Node) { func printnode(node *Node) {
@ -736,7 +736,7 @@ func printnode(node *Node) {
fmt.Printf(" %v%s%s", node, p, a) fmt.Printf(" %v%s%s", node, p, a)
} }
// Pretty print a list of variables. The vars argument is an array of Node*s. // Pretty print a list of variables. The vars argument is an array of Node*s.
func printvars(name string, bv Bvec, vars []*Node) { func printvars(name string, bv Bvec, vars []*Node) {
fmt.Printf("%s:", name) fmt.Printf("%s:", name)
for i, node := range vars { for i, node := range vars {
@ -850,10 +850,10 @@ func checkprog(fn *Node, p *obj.Prog) {
} }
} }
// Check instruction invariants. We assume that the nodes corresponding to the // Check instruction invariants. We assume that the nodes corresponding to the
// sources and destinations of memory operations will be declared in the // sources and destinations of memory operations will be declared in the
// function. This is not strictly true, as is the case for the so-called funny // function. This is not strictly true, as is the case for the so-called funny
// nodes and there are special cases to skip over that stuff. The analysis will // nodes and there are special cases to skip over that stuff. The analysis will
// fail if this invariant blindly changes. // fail if this invariant blindly changes.
func checkptxt(fn *Node, firstp *obj.Prog) { func checkptxt(fn *Node, firstp *obj.Prog) {
if debuglive == 0 { if debuglive == 0 {
@ -931,7 +931,7 @@ func onebitwalktype1(t *Type, xoffset *int64, bv Bvec) {
case TARRAY: case TARRAY:
// The value of t->bound is -1 for slices types and >=0 for // The value of t->bound is -1 for slices types and >=0 for
// for fixed array types. All other values are invalid. // for fixed array types. All other values are invalid.
if t.Bound < -1 { if t.Bound < -1 {
Fatalf("onebitwalktype1: invalid bound, %v", t) Fatalf("onebitwalktype1: invalid bound, %v", t)
} }
@ -975,8 +975,8 @@ func argswords() int32 {
return int32(Curfn.Type.Argwid / int64(Widthptr)) return int32(Curfn.Type.Argwid / int64(Widthptr))
} }
// Generates live pointer value maps for arguments and local variables. The // Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars // this argument and the in arguments are always assumed live. The vars
// argument is an array of Node*s. // argument is an array of Node*s.
func onebitlivepointermap(lv *Liveness, liveout Bvec, vars []*Node, args Bvec, locals Bvec) { func onebitlivepointermap(lv *Liveness, liveout Bvec, vars []*Node, args Bvec, locals Bvec) {
var node *Node var node *Node
@ -1046,7 +1046,7 @@ func issafepoint(prog *obj.Prog) bool {
return prog.As == obj.ATEXT || prog.As == obj.ACALL return prog.As == obj.ATEXT || prog.As == obj.ACALL
} }
// Initializes the sets for solving the live variables. Visits all the // Initializes the sets for solving the live variables. Visits all the
// instructions in each basic block to summarizes the information at each basic // instructions in each basic block to summarizes the information at each basic
// block // block
func livenessprologue(lv *Liveness) { func livenessprologue(lv *Liveness) {
@ -1140,15 +1140,15 @@ func livenesssolve(lv *Liveness) {
} }
} }
// Iterate through the blocks in reverse round-robin fashion. A work // Iterate through the blocks in reverse round-robin fashion. A work
// queue might be slightly faster. As is, the number of iterations is // queue might be slightly faster. As is, the number of iterations is
// so low that it hardly seems to be worth the complexity. // so low that it hardly seems to be worth the complexity.
change = 1 change = 1
for change != 0 { for change != 0 {
change = 0 change = 0
// Walk blocks in the general direction of propagation. This // Walk blocks in the general direction of propagation. This
// improves convergence. // improves convergence.
for i := len(lv.cfg) - 1; i >= 0; i-- { for i := len(lv.cfg) - 1; i >= 0; i-- {
bb := lv.cfg[i] bb := lv.cfg[i]
@ -1714,10 +1714,10 @@ func livenessprintdebug(lv *Liveness) {
fmt.Printf("\n") fmt.Printf("\n")
} }
// Dumps an array of bitmaps to a symbol as a sequence of uint32 values. The // Dumps an array of bitmaps to a symbol as a sequence of uint32 values. The
// first word dumped is the total number of bitmaps. The second word is the // first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The // length of the bitmaps. All bitmaps are assumed to be of equal length. The
// words that are followed are the raw bitmap words. The arr argument is an // words that are followed are the raw bitmap words. The arr argument is an
// array of Node*s. // array of Node*s.
func onebitwritesymbol(arr []Bvec, sym *Sym) { func onebitwritesymbol(arr []Bvec, sym *Sym) {
var i int var i int
@ -1759,7 +1759,7 @@ func printprog(p *obj.Prog) {
} }
} }
// Entry pointer for liveness analysis. Constructs a complete CFG, solves for // Entry pointer for liveness analysis. Constructs a complete CFG, solves for
// the liveness of pointer variables in the function, and emits a runtime data // the liveness of pointer variables in the function, and emits a runtime data
// structure read by the garbage collector. // structure read by the garbage collector.
func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) { func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {

View File

@ -42,7 +42,7 @@ func siglt(a, b *Sig) bool {
} }
// Builds a type representing a Bucket structure for // Builds a type representing a Bucket structure for
// the given map type. This type is not visible to users - // the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC // we include only enough information to generate a correct GC
// program for it. // program for it.
// Make sure this stays in sync with ../../../../runtime/hashmap.go! // Make sure this stays in sync with ../../../../runtime/hashmap.go!
@ -421,7 +421,7 @@ func dimportpath(p *Pkg) {
} }
// If we are compiling the runtime package, there are two runtime packages around // If we are compiling the runtime package, there are two runtime packages around
// -- localpkg and Runtimepkg. We don't want to produce import path symbols for // -- localpkg and Runtimepkg. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg. // both of them, so just produce one for localpkg.
if myimportpath == "runtime" && p == Runtimepkg { if myimportpath == "runtime" && p == Runtimepkg {
return return

View File

@ -245,7 +245,7 @@ type state struct {
// *Node is the unique identifier (an ONAME Node) for the variable. // *Node is the unique identifier (an ONAME Node) for the variable.
vars map[*Node]*ssa.Value vars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID. // all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables. // addresses of PPARAM and PPARAMOUT variables.
@ -254,12 +254,12 @@ type state struct {
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
varsyms map[*Node]interface{} varsyms map[*Node]interface{}
// starting values. Memory, stack pointer, and globals pointer // starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value startmem *ssa.Value
sp *ssa.Value sp *ssa.Value
sb *ssa.Value sb *ssa.Value
// line number stack. The current line number is top of stack // line number stack. The current line number is top of stack
line []int32 line []int32
// list of panic calls by function name and line number. // list of panic calls by function name and line number.
@ -269,7 +269,7 @@ type state struct {
// list of FwdRef values. // list of FwdRef values.
fwdRefs []*ssa.Value fwdRefs []*ssa.Value
// list of PPARAMOUT (return) variables. Does not include PPARAM|PHEAP vars. // list of PPARAMOUT (return) variables. Does not include PPARAM|PHEAP vars.
returns []*Node returns []*Node
cgoUnsafeArgs bool cgoUnsafeArgs bool
@ -339,7 +339,7 @@ func (s *state) startBlock(b *ssa.Block) {
} }
// endBlock marks the end of generating code for the current block. // endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current // Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point. // block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block { func (s *state) endBlock() *ssa.Block {
b := s.curBlock b := s.curBlock
@ -540,7 +540,7 @@ func (s *state) stmt(n *Node) {
b.Kind = ssa.BlockExit b.Kind = ssa.BlockExit
b.Control = m b.Control = m
// TODO: never rewrite OPANIC to OCALLFUNC in the // TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends // first place. Need to wait until all backends
// go through SSA. // go through SSA.
} }
case ODEFER: case ODEFER:
@ -653,8 +653,8 @@ func (s *state) stmt(n *Node) {
rhs := n.Right rhs := n.Right
if rhs != nil && (rhs.Op == OSTRUCTLIT || rhs.Op == OARRAYLIT) { if rhs != nil && (rhs.Op == OSTRUCTLIT || rhs.Op == OARRAYLIT) {
// All literals with nonzero fields have already been // All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{} // rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value. // or equivalents. Use the zero value.
if !iszero(rhs) { if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs) Fatalf("literal with nonzero value in SSA: %v", rhs)
} }
@ -891,10 +891,10 @@ func (s *state) stmt(n *Node) {
} }
// exit processes any code that needs to be generated just before returning. // exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value // It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state. // will be set to the final memory state.
func (s *state) exit() *ssa.Block { func (s *state) exit() *ssa.Block {
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack. // variables back to the stack.
s.stmts(s.exitCode) s.stmts(s.exitCode)
@ -906,7 +906,7 @@ func (s *state) exit() *ssa.Block {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the // TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen // PPARAMOUT slot for spilling it. That won't happen
// currently. // currently.
} }
@ -1382,7 +1382,7 @@ func (s *state) expr(n *Node) *ssa.Value {
case CTBOOL: case CTBOOL:
v := s.constBool(n.Val().U.(bool)) v := s.constBool(n.Val().U.(bool))
// For some reason the frontend gets the line numbers of // For some reason the frontend gets the line numbers of
// CTBOOL literals totally wrong. Fix it here by grabbing // CTBOOL literals totally wrong. Fix it here by grabbing
// the line number of the enclosing AST node. // the line number of the enclosing AST node.
if len(s.line) >= 2 { if len(s.line) >= 2 {
v.Line = s.line[len(s.line)-2] v.Line = s.line[len(s.line)-2]
@ -1925,7 +1925,7 @@ func (s *state) expr(n *Node) *ssa.Value {
tab := s.expr(n.Left) tab := s.expr(n.Left)
data := s.expr(n.Right) data := s.expr(n.Right)
// The frontend allows putting things like struct{*byte} in // The frontend allows putting things like struct{*byte} in
// the data portion of an eface. But we don't want struct{*byte} // the data portion of an eface. But we don't want struct{*byte}
// as a register type because (among other reasons) the liveness // as a register type because (among other reasons) the liveness
// analysis is confused by the "fat" variables that result from // analysis is confused by the "fat" variables that result from
// such types being spilled. // such types being spilled.
@ -2037,7 +2037,7 @@ func (s *state) expr(n *Node) *ssa.Value {
r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
s.vars[&ptrVar] = r[0] s.vars[&ptrVar] = r[0]
// Note: we don't need to read r[1], the result's length. It will be nl. // Note: we don't need to read r[1], the result's length. It will be nl.
// (or maybe we should, we just have to spill/restore nl otherwise?) // (or maybe we should, we just have to spill/restore nl otherwise?)
s.vars[&capVar] = r[2] s.vars[&capVar] = r[2]
b = s.endBlock() b = s.endBlock()
@ -2106,7 +2106,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
return return
// Note: if likely==1, then both recursive calls pass 1. // Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide // If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for // whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch. // the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for // TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info). // OANDAND and OOROR nodes (if it ever has such info).
@ -2191,7 +2191,7 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32)
s.addNamedValue(left, right) s.addNamedValue(left, right)
return return
} }
// Left is not ssa-able. Compute its address. // Left is not ssa-able. Compute its address.
addr := s.addr(left, false) addr := s.addr(left, false)
if left.Op == ONAME { if left.Op == ONAME {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
@ -2333,7 +2333,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
dowidth(fn.Type) dowidth(fn.Type)
stksize := fn.Type.Argwid // includes receiver stksize := fn.Type.Argwid // includes receiver
// Run all argument assignments. The arg slots have already // Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer, // been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls). // +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements. // For OCALLMETH, the receiver is set in these statements.
@ -2462,12 +2462,12 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
return nil return nil
case PAUTO: case PAUTO:
// We need to regenerate the address of autos // We need to regenerate the address of autos
// at every use. This prevents LEA instructions // at every use. This prevents LEA instructions
// from occurring before the corresponding VarDef // from occurring before the corresponding VarDef
// op and confusing the liveness analysis into thinking // op and confusing the liveness analysis into thinking
// the variable is live at function entry. // the variable is live at function entry.
// TODO: I'm not sure if this really works or we're just // TODO: I'm not sure if this really works or we're just
// getting lucky. We might need a real dependency edge // getting lucky. We might need a real dependency edge
// between vardef and addr ops. // between vardef and addr ops.
aux := &ssa.AutoSymbol{Typ: n.Type, Node: n} aux := &ssa.AutoSymbol{Typ: n.Type, Node: n}
return s.newValue1A(ssa.OpAddr, t, aux, s.sp) return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
@ -2599,7 +2599,7 @@ func (s *state) canSSA(n *Node) bool {
func canSSAType(t *Type) bool { func canSSAType(t *Type) bool {
dowidth(t) dowidth(t)
if t.Width > int64(4*Widthptr) { if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it // 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized. // to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure. // Too big and we'll introduce too much register pressure.
return false return false
@ -2647,7 +2647,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
s.startBlock(bNext) s.startBlock(bNext)
} }
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return. // Starts a new block on return.
func (s *state) boundsCheck(idx, len *ssa.Value) { func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 { if Debug['B'] != 0 {
@ -2661,7 +2661,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value) {
s.check(cmp, Panicindex) s.check(cmp, Panicindex)
} }
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return. // Starts a new block on return.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 { if Debug['B'] != 0 {
@ -2701,7 +2701,7 @@ func (s *state) check(cmp *ssa.Value, fn *Node) {
// Returns a slice of results of the given result types. // Returns a slice of results of the given result types.
// The call is added to the end of the current block. // The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block. // If returns is false, the block is marked as an exit block.
// If returns is true, the block is marked as a call block. A new block // If returns is true, the block is marked as a call block. A new block
// is started to load the return values. // is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack // Write args to the stack
@ -2773,7 +2773,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32) {
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym} aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now. // TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte. // Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag) flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
@ -2818,7 +2818,7 @@ func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32) {
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym} aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now. // TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte. // Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag) flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
@ -3018,7 +3018,7 @@ func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var rcap *ssa.Value var rcap *ssa.Value
switch { switch {
case t.IsString(): case t.IsString():
// Capacity of the result is unimportant. However, we use // Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice. // rcap to test if we've generated a zero-length slice.
// Use length of strings for that. // Use length of strings for that.
rcap = rlen rcap = rlen
@ -3123,13 +3123,13 @@ func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Ty
// Code borrowed from old code generator. // Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like // What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float // negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only // conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an // 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and // unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be // double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the // sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we // difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case // preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are // that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot // equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the // be lost else it would round down if the LSB of the
@ -3470,15 +3470,15 @@ func (s *state) mem() *ssa.Value {
} }
func (s *state) linkForwardReferences() { func (s *state) linkForwardReferences() {
// Build SSA graph. Each variable on its first use in a basic block // Build SSA graph. Each variable on its first use in a basic block
// leaves a FwdRef in that block representing the incoming value // leaves a FwdRef in that block representing the incoming value
// of that variable. This function links that ref up with possible definitions, // of that variable. This function links that ref up with possible definitions,
// inserting Phi values as needed. This is essentially the algorithm // inserting Phi values as needed. This is essentially the algorithm
// described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau: // described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
// http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
// Differences: // Differences:
// - We use FwdRef nodes to postpone phi building until the CFG is // - We use FwdRef nodes to postpone phi building until the CFG is
// completely built. That way we can avoid the notion of "sealed" // completely built. That way we can avoid the notion of "sealed"
// blocks. // blocks.
// - Phi optimization is a separate pass (in ../ssa/phielim.go). // - Phi optimization is a separate pass (in ../ssa/phielim.go).
for len(s.fwdRefs) > 0 { for len(s.fwdRefs) > 0 {
@ -3501,7 +3501,7 @@ func (s *state) resolveFwdRef(v *ssa.Value) {
v.Aux = name v.Aux = name
return return
} }
// Not SSAable. Load it. // Not SSAable. Load it.
addr := s.decladdrs[name] addr := s.decladdrs[name]
if addr == nil { if addr == nil {
// TODO: closure args reach here. // TODO: closure args reach here.
@ -3527,7 +3527,7 @@ func (s *state) resolveFwdRef(v *ssa.Value) {
args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line)) args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
} }
// Decide if we need a phi or not. We need a phi if there // Decide if we need a phi or not. We need a phi if there
// are two different args (which are both not v). // are two different args (which are both not v).
var w *ssa.Value var w *ssa.Value
for _, a := range args { for _, a := range args {
@ -3548,7 +3548,7 @@ func (s *state) resolveFwdRef(v *ssa.Value) {
if w == nil { if w == nil {
s.Fatalf("no witness for reachable phi %s", v) s.Fatalf("no witness for reachable phi %s", v)
} }
// One witness. Make v a copy of w. // One witness. Make v a copy of w.
v.Op = ssa.OpCopy v.Op = ssa.OpCopy
v.AddArg(w) v.AddArg(w)
} }
@ -3560,7 +3560,7 @@ func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int
return v return v
} }
// The variable is not defined by b and we haven't // The variable is not defined by b and we haven't
// looked it up yet. Generate a FwdRef for the variable and return that. // looked it up yet. Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, name) v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v) s.fwdRefs = append(s.fwdRefs, v)
m[name] = v m[name] = v
@ -3740,7 +3740,7 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
gcsymdup(gcargs) gcsymdup(gcargs)
gcsymdup(gclocals) gcsymdup(gclocals)
// Add frame prologue. Zero ambiguously live variables. // Add frame prologue. Zero ambiguously live variables.
Thearch.Defframe(ptxt) Thearch.Defframe(ptxt)
if Debug['f'] != 0 { if Debug['f'] != 0 {
frame(0) frame(0)
@ -4115,7 +4115,7 @@ func (s *genState) genValue(v *ssa.Value) {
if v.AuxInt2Int64() == -1<<31 || x == r { if v.AuxInt2Int64() == -1<<31 || x == r {
if x != r { if x != r {
// This code compensates for the fact that the register allocator // This code compensates for the fact that the register allocator
// doesn't understand 2-address instructions yet. TODO: fix that. // doesn't understand 2-address instructions yet. TODO: fix that.
p := Prog(moveByType(v.Type)) p := Prog(moveByType(v.Type))
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = x p.From.Reg = x
@ -4183,7 +4183,7 @@ func (s *genState) genValue(v *ssa.Value) {
ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst,
ssa.OpAMD64ROLBconst: ssa.OpAMD64ROLBconst:
// This code compensates for the fact that the register allocator // This code compensates for the fact that the register allocator
// doesn't understand 2-address instructions yet. TODO: fix that. // doesn't understand 2-address instructions yet. TODO: fix that.
x := regnum(v.Args[0]) x := regnum(v.Args[0])
r := regnum(v) r := regnum(v)
if x != r { if x != r {
@ -4943,7 +4943,7 @@ func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
return v return v
} }
if size > s.config.IntSize { if size > s.config.IntSize {
// TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
// the high word and branch to out-of-bounds failure if it is not 0. // the high word and branch to out-of-bounds failure if it is not 0.
s.Unimplementedf("64->32 index truncation not implemented") s.Unimplementedf("64->32 index truncation not implemented")
return v return v
@ -5089,7 +5089,7 @@ func moveByType(t ssa.Type) int {
} }
// regnum returns the register (in cmd/internal/obj numbering) to // regnum returns the register (in cmd/internal/obj numbering) to
// which v has been allocated. Panics if v is not assigned to a // which v has been allocated. Panics if v is not assigned to a
// register. // register.
// TODO: Make this panic again once it stops happening routinely. // TODO: Make this panic again once it stops happening routinely.
func regnum(v *ssa.Value) int16 { func regnum(v *ssa.Value) int16 {

View File

@ -739,7 +739,7 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
} }
if t1.Sym != nil || t2.Sym != nil { if t1.Sym != nil || t2.Sym != nil {
// Special case: we keep byte and uint8 separate // Special case: we keep byte and uint8 separate
// for error messages. Treat them as equal. // for error messages. Treat them as equal.
switch t1.Etype { switch t1.Etype {
case TUINT8: case TUINT8:
if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) { if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
@ -997,7 +997,7 @@ func convertop(src *Type, dst *Type, why *string) Op {
} }
// The rules for interfaces are no different in conversions // The rules for interfaces are no different in conversions
// than assignments. If interfaces are involved, stop now // than assignments. If interfaces are involved, stop now
// with the good message from assignop. // with the good message from assignop.
// Otherwise clear the error. // Otherwise clear the error.
if src.Etype == TINTER || dst.Etype == TINTER { if src.Etype == TINTER || dst.Etype == TINTER {
@ -2684,8 +2684,8 @@ func ngotype(n *Node) *Sym {
} }
// Convert raw string to the prefix that will be used in the symbol // Convert raw string to the prefix that will be used in the symbol
// table. All control characters, space, '%' and '"', as well as // table. All control characters, space, '%' and '"', as well as
// non-7-bit clean bytes turn into %xx. The period needs escaping // non-7-bit clean bytes turn into %xx. The period needs escaping
// only in the last segment of the path, and it makes for happier // only in the last segment of the path, and it makes for happier
// users if we escape that as little as possible. // users if we escape that as little as possible.
// //

View File

@ -143,7 +143,7 @@ func (v V) val() int64 {
// address taken to force heap allocation, and then based on // address taken to force heap allocation, and then based on
// the value of which a pair of those locals are copied in // the value of which a pair of those locals are copied in
// various ways to the two results y, and z, which are also // various ways to the two results y, and z, which are also
// addressed. Which is expected to be one of 11-13, 21-23, 31, 32, // addressed. Which is expected to be one of 11-13, 21-23, 31, 32,
// and y.val() should be equal to which and y.p.val() should // and y.val() should be equal to which and y.p.val() should
// be equal to z.val(). Also, x(.p)**8 == x; that is, the // be equal to z.val(). Also, x(.p)**8 == x; that is, the
// autos are all linked into a ring. // autos are all linked into a ring.

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// This program generates a test to verify that the standard arithmetic // This program generates a test to verify that the standard arithmetic
// operators properly handle some special cases. The test file should be // operators properly handle some special cases. The test file should be
// generated with a known working version of go. // generated with a known working version of go.
// launch with `go run arithBoundaryGen.go` a file called arithBoundary_ssa.go // launch with `go run arithBoundaryGen.go` a file called arithBoundary_ssa.go
// will be written into the parent directory containing the tests // will be written into the parent directory containing the tests

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// This program generates a test to verify that the standard arithmetic // This program generates a test to verify that the standard arithmetic
// operators properly handle const cases. The test file should be // operators properly handle const cases. The test file should be
// generated with a known working version of go. // generated with a known working version of go.
// launch with `go run arithConstGen.go` a file called arithConst_ssa.go // launch with `go run arithConstGen.go` a file called arithConst_ssa.go
// will be written into the parent directory containing the tests // will be written into the parent directory containing the tests

View File

@ -42,7 +42,7 @@ func testStoreSize_ssa(p *uint16, q *uint16, v uint32) {
switch { switch {
} }
// Test to make sure that (Store ptr (Trunc32to16 val) mem) // Test to make sure that (Store ptr (Trunc32to16 val) mem)
// does not end up as a 32-bit store. It must stay a 16 bit store // does not end up as a 32-bit store. It must stay a 16 bit store
// even when Trunc32to16 is rewritten to be a nop. // even when Trunc32to16 is rewritten to be a nop.
// To ensure that we get rewrite the Trunc32to16 before // To ensure that we get rewrite the Trunc32to16 before
// we rewrite the Store, we force the truncate into an // we rewrite the Store, we force the truncate into an

View File

@ -85,7 +85,7 @@ func foo() int32 {
z = int32(data2[25]) z = int32(data2[25])
} }
// Lots of phis of the form phi(int32,int64) of type int32 happen here. // Lots of phis of the form phi(int32,int64) of type int32 happen here.
// Some will be stack phis. For those stack phis, make sure the spill // Some will be stack phis. For those stack phis, make sure the spill
// of the second argument uses the phi's width (4 bytes), not its width // of the second argument uses the phi's width (4 bytes), not its width
// (8 bytes). Otherwise, a random stack slot gets clobbered. // (8 bytes). Otherwise, a random stack slot gets clobbered.

View File

@ -30,7 +30,7 @@ func f_ssa() *[8]uint {
} else { } else {
x = 0 x = 0
} }
// Clobber the global pointer. The only live ref // Clobber the global pointer. The only live ref
// to the allocated object is now x. // to the allocated object is now x.
a = nil a = nil
@ -66,7 +66,7 @@ func g_ssa() *[7]uint {
} else { } else {
x = 0 x = 0
} }
// Clobber the global pointer. The only live ref // Clobber the global pointer. The only live ref
// to the allocated object is now x. // to the allocated object is now x.
a = nil a = nil

View File

@ -117,7 +117,7 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
if t.Sym != nil || x.Sym != nil { if t.Sym != nil || x.Sym != nil {
// Special case: we keep byte and uint8 separate // Special case: we keep byte and uint8 separate
// for error messages. Treat them as equal. // for error messages. Treat them as equal.
switch t.Etype { switch t.Etype {
case TUINT8: case TUINT8:
if (t == Types[TUINT8] || t == bytetype) && (x == Types[TUINT8] || x == bytetype) { if (t == Types[TUINT8] || t == bytetype) && (x == Types[TUINT8] || x == bytetype) {

View File

@ -2840,7 +2840,7 @@ func keydup(n *Node, hash map[uint32][]*Node) {
cmp.Right = a.Left cmp.Right = a.Left
evconst(&cmp) evconst(&cmp)
if cmp.Op == OLITERAL { if cmp.Op == OLITERAL {
// Sometimes evconst fails. See issue 12536. // Sometimes evconst fails. See issue 12536.
b = cmp.Val().U.(bool) b = cmp.Val().U.(bool)
} }
} }
@ -3074,7 +3074,7 @@ func typecheckcomplit(np **Node) {
Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t) Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
} }
// No pushtype allowed here. Must name fields for that. // No pushtype allowed here. Must name fields for that.
ll.N = assignconv(ll.N, f.Type, "field value") ll.N = assignconv(ll.N, f.Type, "field value")
ll.N = Nod(OKEY, newname(f.Sym), ll.N) ll.N = Nod(OKEY, newname(f.Sym), ll.N)
@ -3114,7 +3114,7 @@ func typecheckcomplit(np **Node) {
} }
// Sym might have resolved to name in other top-level // Sym might have resolved to name in other top-level
// package, because of import dot. Redirect to correct sym // package, because of import dot. Redirect to correct sym
// before we do the lookup. // before we do the lookup.
if s.Pkg != localpkg && exportname(s.Name) { if s.Pkg != localpkg && exportname(s.Name) {
s1 = Lookup(s.Name) s1 = Lookup(s.Name)
@ -3136,7 +3136,7 @@ func typecheckcomplit(np **Node) {
fielddup(newname(s), hash) fielddup(newname(s), hash)
r = l.Right r = l.Right
// No pushtype allowed here. Tried and rejected. // No pushtype allowed here. Tried and rejected.
typecheck(&r, Erv) typecheck(&r, Erv)
l.Right = assignconv(r, f.Type, "field value") l.Right = assignconv(r, f.Type, "field value")
@ -3504,7 +3504,7 @@ func domethod(n *Node) {
// } // }
// then even though I.M looks like it doesn't care about the // then even though I.M looks like it doesn't care about the
// value of its argument, a specific implementation of I may // value of its argument, a specific implementation of I may
// care. The _ would suppress the assignment to that argument // care. The _ would suppress the assignment to that argument
// while generating a call, so remove it. // while generating a call, so remove it.
for t := getinargx(nt.Type).Type; t != nil; t = t.Down { for t := getinargx(nt.Type).Type; t != nil; t = t.Down {
if t.Sym != nil && t.Sym.Name == "_" { if t.Sym != nil && t.Sym.Name == "_" {

View File

@ -2788,7 +2788,7 @@ func appendslice(n *Node, init **NodeList) *Node {
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're // and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly. // modifying here. Fix explicitly.
for l := n.List; l != nil; l = l.Next { for l := n.List; l != nil; l = l.Next {
l.N = cheapexpr(l.N, init) l.N = cheapexpr(l.N, init)
} }
@ -2907,7 +2907,7 @@ func walkappend(n *Node, init **NodeList, dst *Node) *Node {
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're // and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly. // modifying here. Fix explicitly.
// Using cheapexpr also makes sure that the evaluation // Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen // of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way. // before we begin to modify the slice in a visible way.
@ -3241,7 +3241,7 @@ func walkcompare(np **Node, init **NodeList) {
return return
} }
// Chose not to inline. Call equality function directly. // Chose not to inline. Call equality function directly.
var needsize int var needsize int
call := Nod(OCALL, eqfor(t, &needsize), nil) call := Nod(OCALL, eqfor(t, &needsize), nil)

View File

@ -129,7 +129,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// TODO: Instead of generating ADDV $-8,R8; ADDV // TODO: Instead of generating ADDV $-8,R8; ADDV
// $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7); // $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7);
// ADDV $8,R7;) just generate the offsets directly and // ADDV $8,R7;) just generate the offsets directly and
// eliminate the ADDs. That will produce shorter, more // eliminate the ADDs. That will produce shorter, more
// pipeline-able code. // pipeline-able code.
var p *obj.Prog var p *obj.Prog
for ; c > 0; c-- { for ; c > 0; c-- {

View File

@ -62,7 +62,7 @@ loop1:
// distinguish between moves that moves that *must* // distinguish between moves that moves that *must*
// sign/zero extend and moves that don't care so they // sign/zero extend and moves that don't care so they
// can eliminate moves that don't care without // can eliminate moves that don't care without
// breaking moves that do care. This might let us // breaking moves that do care. This might let us
// simplify or remove the next peep loop, too. // simplify or remove the next peep loop, too.
if p.As == mips.AMOVV || p.As == mips.AMOVF || p.As == mips.AMOVD { if p.As == mips.AMOVV || p.As == mips.AMOVF || p.As == mips.AMOVD {
if regtyp(&p.To) { if regtyp(&p.To) {
@ -697,7 +697,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// copyas returns 1 if a and v address the same register. // copyas returns 1 if a and v address the same register.
// //
// If a is the from operand, this means this operation reads the // If a is the from operand, this means this operation reads the
// register in v. If a is the to operand, this means this operation // register in v. If a is the to operand, this means this operation
// writes the register in v. // writes the register in v.
func copyas(a *obj.Addr, v *obj.Addr) bool { func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) { if regtyp(v) {
@ -714,7 +714,7 @@ func copyas(a *obj.Addr, v *obj.Addr) bool {
// same register as v. // same register as v.
// //
// If a is the from operand, this means this operation reads the // If a is the from operand, this means this operation reads the
// register in v. If a is the to operand, this means the operation // register in v. If a is the to operand, this means the operation
// either reads or writes the register in v (if !copyas(a, v), then // either reads or writes the register in v (if !copyas(a, v), then
// the operation reads the register in v). // the operation reads the register in v).
func copyau(a *obj.Addr, v *obj.Addr) bool { func copyau(a *obj.Addr, v *obj.Addr) bool {

View File

@ -123,7 +123,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// TODO(austin): Instead of generating ADD $-8,R8; ADD // TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the // generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more // ADDs. That will produce shorter, more
// pipeline-able code. // pipeline-able code.
var p *obj.Prog var p *obj.Prog
for ; c > 0; c-- { for ; c > 0; c-- {

View File

@ -42,9 +42,9 @@ var resvd = []int{
ppc64.REGZERO, ppc64.REGZERO,
ppc64.REGSP, // reserved for SP ppc64.REGSP, // reserved for SP
// We need to preserve the C ABI TLS pointer because sigtramp // We need to preserve the C ABI TLS pointer because sigtramp
// may happen during C code and needs to access the g. C // may happen during C code and needs to access the g. C
// clobbers REGG, so if Go were to clobber REGTLS, sigtramp // clobbers REGG, so if Go were to clobber REGTLS, sigtramp
// won't know which convention to use. By preserving REGTLS, // won't know which convention to use. By preserving REGTLS,
// we can just retrieve g from TLS when we aren't sure. // we can just retrieve g from TLS when we aren't sure.
ppc64.REGTLS, ppc64.REGTLS,

View File

@ -5,7 +5,7 @@
package ppc64 package ppc64
// Many Power ISA arithmetic and logical instructions come in four // Many Power ISA arithmetic and logical instructions come in four
// standard variants. These bits let us map between variants. // standard variants. These bits let us map between variants.
const ( const (
V_CC = 1 << 0 // xCC (affect CR field 0 flags) V_CC = 1 << 0 // xCC (affect CR field 0 flags)
V_V = 1 << 1 // xV (affect SO and OV flags) V_V = 1 << 1 // xV (affect SO and OV flags)

View File

@ -62,7 +62,7 @@ loop1:
// distinguish between moves that moves that *must* // distinguish between moves that moves that *must*
// sign/zero extend and moves that don't care so they // sign/zero extend and moves that don't care so they
// can eliminate moves that don't care without // can eliminate moves that don't care without
// breaking moves that do care. This might let us // breaking moves that do care. This might let us
// simplify or remove the next peep loop, too. // simplify or remove the next peep loop, too.
if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD { if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
if regtyp(&p.To) { if regtyp(&p.To) {
@ -962,7 +962,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// copyas returns 1 if a and v address the same register. // copyas returns 1 if a and v address the same register.
// //
// If a is the from operand, this means this operation reads the // If a is the from operand, this means this operation reads the
// register in v. If a is the to operand, this means this operation // register in v. If a is the to operand, this means this operation
// writes the register in v. // writes the register in v.
func copyas(a *obj.Addr, v *obj.Addr) bool { func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) { if regtyp(v) {
@ -979,7 +979,7 @@ func copyas(a *obj.Addr, v *obj.Addr) bool {
// same register as v. // same register as v.
// //
// If a is the from operand, this means this operation reads the // If a is the from operand, this means this operation reads the
// register in v. If a is the to operand, this means the operation // register in v. If a is the to operand, this means the operation
// either reads or writes the register in v (if !copyas(a, v), then // either reads or writes the register in v (if !copyas(a, v), then
// the operation reads the register in v). // the operation reads the register in v).
func copyau(a *obj.Addr, v *obj.Addr) bool { func copyau(a *obj.Addr, v *obj.Addr) bool {

View File

@ -180,8 +180,8 @@ func proginfo(p *obj.Prog) {
} }
} }
// Instruction variants table. Initially this contains entries only // Instruction variants table. Initially this contains entries only
// for the "base" form of each instruction. On the first call to // for the "base" form of each instruction. On the first call to
// as2variant or variant2as, we'll add the variants to the table. // as2variant or variant2as, we'll add the variants to the table.
var varianttable = [ppc64.ALAST][4]int{ var varianttable = [ppc64.ALAST][4]int{
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC}, ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},

View File

@ -8,29 +8,29 @@ import "fmt"
// Block represents a basic block in the control flow graph of a function. // Block represents a basic block in the control flow graph of a function.
type Block struct { type Block struct {
// A unique identifier for the block. The system will attempt to allocate // A unique identifier for the block. The system will attempt to allocate
// these IDs densely, but no guarantees. // these IDs densely, but no guarantees.
ID ID ID ID
// The kind of block this is. // The kind of block this is.
Kind BlockKind Kind BlockKind
// Subsequent blocks, if any. The number and order depend on the block kind. // Subsequent blocks, if any. The number and order depend on the block kind.
// All successors must be distinct (to make phi values in successors unambiguous). // All successors must be distinct (to make phi values in successors unambiguous).
Succs []*Block Succs []*Block
// Inverse of successors. // Inverse of successors.
// The order is significant to Phi nodes in the block. // The order is significant to Phi nodes in the block.
Preds []*Block Preds []*Block
// TODO: predecessors is a pain to maintain. Can we somehow order phi // TODO: predecessors is a pain to maintain. Can we somehow order phi
// arguments by block id and have this field computed explicitly when needed? // arguments by block id and have this field computed explicitly when needed?
// A value that determines how the block is exited. Its value depends on the kind // A value that determines how the block is exited. Its value depends on the kind
// of the block. For instance, a BlockIf has a boolean control value and BlockExit // of the block. For instance, a BlockIf has a boolean control value and BlockExit
// has a memory control value. // has a memory control value.
Control *Value Control *Value
// Auxiliary info for the block. Its value depends on the Kind. // Auxiliary info for the block. Its value depends on the Kind.
Aux interface{} Aux interface{}
// The unordered set of Values that define the operation of this block. // The unordered set of Values that define the operation of this block.
@ -97,7 +97,7 @@ func (b *Block) LongString() string {
return s return s
} }
// AddEdgeTo adds an edge from block b to block c. Used during building of the // AddEdgeTo adds an edge from block b to block c. Used during building of the
// SSA graph; do not use on an already-completed SSA graph. // SSA graph; do not use on an already-completed SSA graph.
func (b *Block) AddEdgeTo(c *Block) { func (b *Block) AddEdgeTo(c *Block) {
b.Succs = append(b.Succs, c) b.Succs = append(b.Succs, c)

View File

@ -33,7 +33,7 @@ func checkFunc(f *Func) {
// If the conditional is true, does v get the value of a or b? // If the conditional is true, does v get the value of a or b?
// We could solve this other ways, but the easiest is just to // We could solve this other ways, but the easiest is just to
// require (by possibly adding empty control-flow blocks) that // require (by possibly adding empty control-flow blocks) that
// all successors are distinct. They will need to be distinct // all successors are distinct. They will need to be distinct
// anyway for register allocation (duplicate successors implies // anyway for register allocation (duplicate successors implies
// the existence of critical edges). // the existence of critical edges).
// After regalloc we can allow non-distinct predecessors. // After regalloc we can allow non-distinct predecessors.

View File

@ -114,9 +114,9 @@ type pass struct {
// PhaseOption sets the specified flag in the specified ssa phase, // PhaseOption sets the specified flag in the specified ssa phase,
// returning empty string if this was successful or a string explaining // returning empty string if this was successful or a string explaining
// the error if it was not. A version of the phase name with "_" // the error if it was not. A version of the phase name with "_"
// replaced by " " is also checked for a match. // replaced by " " is also checked for a match.
// See gc/lex.go for dissection of the option string. Example use: // See gc/lex.go for dissection of the option string. Example use:
// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash ... // GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash ...
// //
func PhaseOption(phase, flag string, val int) string { func PhaseOption(phase, flag string, val int) string {
@ -189,7 +189,7 @@ var passes = [...]pass{
// Double-check phase ordering constraints. // Double-check phase ordering constraints.
// This code is intended to document the ordering requirements // This code is intended to document the ordering requirements
// between different phases. It does not override the passes // between different phases. It does not override the passes
// list above. // list above.
type constraint struct { type constraint struct {
a, b string // a must come before b a, b string // a must come before b

View File

@ -24,7 +24,7 @@ type Config struct {
optimize bool // Do optimization optimize bool // Do optimization
curFunc *Func curFunc *Func
// TODO: more stuff. Compiler flags of interest, ... // TODO: more stuff. Compiler flags of interest, ...
// Given an environment variable used for debug hash match, // Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging? // what file (if any) receives the yes/no logging?
@ -95,7 +95,7 @@ type Frontend interface {
Line(int32) string Line(int32) string
} }
// interface used to hold *gc.Node. We'd use *gc.Node directly but // interface used to hold *gc.Node. We'd use *gc.Node directly but
// that would lead to an import cycle. // that would lead to an import cycle.
type GCNode interface { type GCNode interface {
Typ() Type Typ() Type

View File

@ -14,7 +14,7 @@ const (
) )
// cse does common-subexpression elimination on the Function. // cse does common-subexpression elimination on the Function.
// Values are just relinked, nothing is deleted. A subsequent deadcode // Values are just relinked, nothing is deleted. A subsequent deadcode
// pass is required to actually remove duplicate expressions. // pass is required to actually remove duplicate expressions.
func cse(f *Func) { func cse(f *Func) {
// Two values are equivalent if they satisfy the following definition: // Two values are equivalent if they satisfy the following definition:
@ -82,7 +82,7 @@ func cse(f *Func) {
} }
// Find an equivalence class where some members of the class have // Find an equivalence class where some members of the class have
// non-equivalent arguments. Split the equivalence class appropriately. // non-equivalent arguments. Split the equivalence class appropriately.
// Repeat until we can't find any more splits. // Repeat until we can't find any more splits.
for { for {
changed := false changed := false
@ -117,7 +117,7 @@ func cse(f *Func) {
changed = true changed = true
continue eqloop continue eqloop
} }
// v and w are equivalent. Keep w in e. // v and w are equivalent. Keep w in e.
j++ j++
} }
partition[i] = e partition[i] = e
@ -135,7 +135,7 @@ func cse(f *Func) {
idom := dominators(f) idom := dominators(f)
sdom := newSparseTree(f, idom) sdom := newSparseTree(f, idom)
// Compute substitutions we would like to do. We substitute v for w // Compute substitutions we would like to do. We substitute v for w
// if v and w are in the same equivalence class and v dominates w. // if v and w are in the same equivalence class and v dominates w.
rewrite := make([]*Value, f.NumValues()) rewrite := make([]*Value, f.NumValues())
for _, e := range partition { for _, e := range partition {
@ -191,7 +191,7 @@ func cse(f *Func) {
} }
} }
// An eqclass approximates an equivalence class. During the // An eqclass approximates an equivalence class. During the
// algorithm it may represent the union of several of the // algorithm it may represent the union of several of the
// final equivalence classes. // final equivalence classes.
type eqclass []*Value type eqclass []*Value
@ -207,7 +207,7 @@ type eqclass []*Value
// - first two arg's opcodes and auxint // - first two arg's opcodes and auxint
// - NOT first two arg's aux; that can break CSE. // - NOT first two arg's aux; that can break CSE.
// partitionValues returns a list of equivalence classes, each // partitionValues returns a list of equivalence classes, each
// being a sorted by ID list of *Values. The eqclass slices are // being a sorted by ID list of *Values. The eqclass slices are
// backed by the same storage as the input slice. // backed by the same storage as the input slice.
// Equivalence classes of size 1 are ignored. // Equivalence classes of size 1 are ignored.
func partitionValues(a []*Value, auxIDs auxmap) []eqclass { func partitionValues(a []*Value, auxIDs auxmap) []eqclass {

View File

@ -84,9 +84,9 @@ func liveValues(f *Func, reachable []bool) []bool {
// deadcode removes dead code from f. // deadcode removes dead code from f.
func deadcode(f *Func) { func deadcode(f *Func) {
// deadcode after regalloc is forbidden for now. Regalloc // deadcode after regalloc is forbidden for now. Regalloc
// doesn't quite generate legal SSA which will lead to some // doesn't quite generate legal SSA which will lead to some
// required moves being eliminated. See the comment at the // required moves being eliminated. See the comment at the
// top of regalloc.go for details. // top of regalloc.go for details.
if f.RegAlloc != nil { if f.RegAlloc != nil {
f.Fatalf("deadcode after regalloc") f.Fatalf("deadcode after regalloc")
@ -164,7 +164,7 @@ func deadcode(f *Func) {
} }
f.Names = f.Names[:i] f.Names = f.Names[:i]
// Remove dead values from blocks' value list. Return dead // Remove dead values from blocks' value list. Return dead
// values to the allocator. // values to the allocator.
for _, b := range f.Blocks { for _, b := range f.Blocks {
i := 0 i := 0
@ -184,7 +184,7 @@ func deadcode(f *Func) {
b.Values = b.Values[:i] b.Values = b.Values[:i]
} }
// Remove unreachable blocks. Return dead blocks to allocator. // Remove unreachable blocks. Return dead blocks to allocator.
i = 0 i = 0
for _, b := range f.Blocks { for _, b := range f.Blocks {
if reachable[b.ID] { if reachable[b.ID] {
@ -235,11 +235,11 @@ func (b *Block) removePred(p *Block) {
v.Args[n] = nil // aid GC v.Args[n] = nil // aid GC
v.Args = v.Args[:n] v.Args = v.Args[:n]
phielimValue(v) phielimValue(v)
// Note: this is trickier than it looks. Replacing // Note: this is trickier than it looks. Replacing
// a Phi with a Copy can in general cause problems because // a Phi with a Copy can in general cause problems because
// Phi and Copy don't have exactly the same semantics. // Phi and Copy don't have exactly the same semantics.
// Phi arguments always come from a predecessor block, // Phi arguments always come from a predecessor block,
// whereas copies don't. This matters in loops like: // whereas copies don't. This matters in loops like:
// 1: x = (Phi y) // 1: x = (Phi y)
// y = (Add x 1) // y = (Add x 1)
// goto 1 // goto 1
@ -253,15 +253,15 @@ func (b *Block) removePred(p *Block) {
// will barf on it. // will barf on it.
// //
// Fortunately, this situation can only happen for dead // Fortunately, this situation can only happen for dead
// code loops. We know the code we're working with is // code loops. We know the code we're working with is
// not dead, so we're ok. // not dead, so we're ok.
// Proof: If we have a potential bad cycle, we have a // Proof: If we have a potential bad cycle, we have a
// situation like this: // situation like this:
// x = (Phi z) // x = (Phi z)
// y = (op1 x ...) // y = (op1 x ...)
// z = (op2 y ...) // z = (op2 y ...)
// Where opX are not Phi ops. But such a situation // Where opX are not Phi ops. But such a situation
// implies a cycle in the dominator graph. In the // implies a cycle in the dominator graph. In the
// example, x.Block dominates y.Block, y.Block dominates // example, x.Block dominates y.Block, y.Block dominates
// z.Block, and z.Block dominates x.Block (treating // z.Block, and z.Block dominates x.Block (treating
// "dominates" as reflexive). Cycles in the dominator // "dominates" as reflexive). Cycles in the dominator

View File

@ -7,7 +7,7 @@ package ssa
// dse does dead-store elimination on the Function. // dse does dead-store elimination on the Function.
// Dead stores are those which are unconditionally followed by // Dead stores are those which are unconditionally followed by
// another store to the same location, with no intervening load. // another store to the same location, with no intervening load.
// This implementation only works within a basic block. TODO: use something more global. // This implementation only works within a basic block. TODO: use something more global.
func dse(f *Func) { func dse(f *Func) {
var stores []*Value var stores []*Value
loadUse := f.newSparseSet(f.NumValues()) loadUse := f.newSparseSet(f.NumValues())
@ -17,7 +17,7 @@ func dse(f *Func) {
shadowed := f.newSparseSet(f.NumValues()) shadowed := f.newSparseSet(f.NumValues())
defer f.retSparseSet(shadowed) defer f.retSparseSet(shadowed)
for _, b := range f.Blocks { for _, b := range f.Blocks {
// Find all the stores in this block. Categorize their uses: // Find all the stores in this block. Categorize their uses:
// loadUse contains stores which are used by a subsequent load. // loadUse contains stores which are used by a subsequent load.
// storeUse contains stores which are used by a subsequent store. // storeUse contains stores which are used by a subsequent store.
loadUse.clear() loadUse.clear()
@ -67,9 +67,9 @@ func dse(f *Func) {
b.Fatalf("no last store found - cycle?") b.Fatalf("no last store found - cycle?")
} }
// Walk backwards looking for dead stores. Keep track of shadowed addresses. // Walk backwards looking for dead stores. Keep track of shadowed addresses.
// An "address" is an SSA Value which encodes both the address and size of // An "address" is an SSA Value which encodes both the address and size of
// the write. This code will not remove dead stores to the same address // the write. This code will not remove dead stores to the same address
// of different types. // of different types.
shadowed.clear() shadowed.clear()
v := last v := last

View File

@ -65,7 +65,7 @@ func TestDeadStorePhi(t *testing.T) {
} }
func TestDeadStoreTypes(t *testing.T) { func TestDeadStoreTypes(t *testing.T) {
// Make sure a narrow store can't shadow a wider one. We test an even // Make sure a narrow store can't shadow a wider one. We test an even
// stronger restriction, that one store can't shadow another unless the // stronger restriction, that one store can't shadow another unless the
// types of the address fields are identical (where identicalness is // types of the address fields are identical (where identicalness is
// decided by the CSE pass). // decided by the CSE pass).

View File

@ -16,7 +16,7 @@ const (
// of a control-flow graph. // of a control-flow graph.
// postorder computes a postorder traversal ordering for the // postorder computes a postorder traversal ordering for the
// basic blocks in f. Unreachable blocks will not appear. // basic blocks in f. Unreachable blocks will not appear.
func postorder(f *Func) []*Block { func postorder(f *Func) []*Block {
mark := make([]byte, f.NumBlocks()) mark := make([]byte, f.NumBlocks())
@ -31,12 +31,12 @@ func postorder(f *Func) []*Block {
b := s[len(s)-1] b := s[len(s)-1]
switch mark[b.ID] { switch mark[b.ID] {
case explored: case explored:
// Children have all been visited. Pop & output block. // Children have all been visited. Pop & output block.
s = s[:len(s)-1] s = s[:len(s)-1]
mark[b.ID] = done mark[b.ID] = done
order = append(order, b) order = append(order, b)
case notExplored: case notExplored:
// Children have not been visited yet. Mark as explored // Children have not been visited yet. Mark as explored
// and queue any children we haven't seen yet. // and queue any children we haven't seen yet.
mark[b.ID] = explored mark[b.ID] = explored
for _, c := range b.Succs { for _, c := range b.Succs {
@ -140,9 +140,9 @@ func (f *Func) dfs(entries []*Block, succFn linkedBlocks, dfnum, order, parent [
return return
} }
// dominators computes the dominator tree for f. It returns a slice // dominators computes the dominator tree for f. It returns a slice
// which maps block ID to the immediate dominator of that block. // which maps block ID to the immediate dominator of that block.
// Unreachable blocks map to nil. The entry block maps to nil. // Unreachable blocks map to nil. The entry block maps to nil.
func dominators(f *Func) []*Block { func dominators(f *Func) []*Block {
preds := func(b *Block) []*Block { return b.Preds } preds := func(b *Block) []*Block { return b.Preds }
succs := func(b *Block) []*Block { return b.Succs } succs := func(b *Block) []*Block { return b.Succs }
@ -298,9 +298,9 @@ func eval(v ID, ancestor []ID, semi []ID, dfnum []ID, best []ID) ID {
return best[v] return best[v]
} }
// dominators computes the dominator tree for f. It returns a slice // dominators computes the dominator tree for f. It returns a slice
// which maps block ID to the immediate dominator of that block. // which maps block ID to the immediate dominator of that block.
// Unreachable blocks map to nil. The entry block maps to nil. // Unreachable blocks map to nil. The entry block maps to nil.
func dominatorsSimple(f *Func) []*Block { func dominatorsSimple(f *Func) []*Block {
// A simple algorithm for now // A simple algorithm for now
// Cooper, Harvey, Kennedy // Cooper, Harvey, Kennedy

View File

@ -7,18 +7,18 @@ package ssa
const flagRegMask = regMask(1) << 33 // TODO: arch-specific const flagRegMask = regMask(1) << 33 // TODO: arch-specific
// flagalloc allocates the flag register among all the flag-generating // flagalloc allocates the flag register among all the flag-generating
// instructions. Flag values are recomputed if they need to be // instructions. Flag values are recomputed if they need to be
// spilled/restored. // spilled/restored.
func flagalloc(f *Func) { func flagalloc(f *Func) {
// Compute the in-register flag value we want at the end of // Compute the in-register flag value we want at the end of
// each block. This is basically a best-effort live variable // each block. This is basically a best-effort live variable
// analysis, so it can be much simpler than a full analysis. // analysis, so it can be much simpler than a full analysis.
// TODO: do we really need to keep flag values live across blocks? // TODO: do we really need to keep flag values live across blocks?
// Could we force the flags register to be unused at basic block // Could we force the flags register to be unused at basic block
// boundaries? Then we wouldn't need this computation. // boundaries? Then we wouldn't need this computation.
end := make([]*Value, f.NumBlocks()) end := make([]*Value, f.NumBlocks())
for n := 0; n < 2; n++ { for n := 0; n < 2; n++ {
// Walk blocks backwards. Poor-man's postorder traversal. // Walk blocks backwards. Poor-man's postorder traversal.
for i := len(f.Blocks) - 1; i >= 0; i-- { for i := len(f.Blocks) - 1; i >= 0; i-- {
b := f.Blocks[i] b := f.Blocks[i]
// Walk values backwards to figure out what flag // Walk values backwards to figure out what flag
@ -117,7 +117,7 @@ func flagalloc(f *Func) {
// subsequent blocks. // subsequent blocks.
_ = v.copyInto(b) _ = v.copyInto(b)
// Note: this flag generator is not properly linked up // Note: this flag generator is not properly linked up
// with the flag users. This breaks the SSA representation. // with the flag users. This breaks the SSA representation.
// We could fix up the users with another pass, but for now // We could fix up the users with another pass, but for now
// we'll just leave it. (Regalloc has the same issue for // we'll just leave it. (Regalloc has the same issue for
// standard regs, and it runs next.) // standard regs, and it runs next.)

View File

@ -10,7 +10,7 @@ import (
) )
// A Func represents a Go func declaration (or function literal) and // A Func represents a Go func declaration (or function literal) and
// its body. This package compiles each Func independently. // its body. This package compiles each Func independently.
type Func struct { type Func struct {
Config *Config // architecture information Config *Config // architecture information
pass *pass // current pass information (name, options, etc.) pass *pass // current pass information (name, options, etc.)
@ -29,7 +29,7 @@ type Func struct {
// map from LocalSlot to set of Values that we want to store in that slot. // map from LocalSlot to set of Values that we want to store in that slot.
NamedValues map[LocalSlot][]*Value NamedValues map[LocalSlot][]*Value
// Names is a copy of NamedValues.Keys. We keep a separate list // Names is a copy of NamedValues.Keys. We keep a separate list
// of keys to make iteration order deterministic. // of keys to make iteration order deterministic.
Names []LocalSlot Names []LocalSlot
@ -109,7 +109,7 @@ func (f *Func) logStat(key string, args ...interface{}) {
f.Config.Warnl(int(f.Entry.Line), "\t%s\t%s%s\t%s", f.pass.name, key, value, f.Name) f.Config.Warnl(int(f.Entry.Line), "\t%s\t%s%s\t%s", f.pass.name, key, value, f.Name)
} }
// freeValue frees a value. It must no longer be referenced. // freeValue frees a value. It must no longer be referenced.
func (f *Func) freeValue(v *Value) { func (f *Func) freeValue(v *Value) {
if v.Block == nil { if v.Block == nil {
f.Fatalf("trying to free an already freed value") f.Fatalf("trying to free an already freed value")
@ -177,7 +177,7 @@ func (b *Block) NewValue0I(line int32, op Op, t Type, auxint int64) *Value {
// NewValue returns a new value in the block with no arguments and an aux value. // NewValue returns a new value in the block with no arguments and an aux value.
func (b *Block) NewValue0A(line int32, op Op, t Type, aux interface{}) *Value { func (b *Block) NewValue0A(line int32, op Op, t Type, aux interface{}) *Value {
if _, ok := aux.(int64); ok { if _, ok := aux.(int64); ok {
// Disallow int64 aux values. They should be in the auxint field instead. // Disallow int64 aux values. They should be in the auxint field instead.
// Maybe we want to allow this at some point, but for now we disallow it // Maybe we want to allow this at some point, but for now we disallow it
// to prevent errors like using NewValue1A instead of NewValue1I. // to prevent errors like using NewValue1A instead of NewValue1I.
b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux) b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)

View File

@ -3,8 +3,8 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// x86 register conventions: // x86 register conventions:
// - Integer types live in the low portion of registers. Upper portions are junk. // - Integer types live in the low portion of registers. Upper portions are junk.
// - Boolean types use the low-order byte of a register. Upper bytes are junk. // - Boolean types use the low-order byte of a register. Upper bytes are junk.
// - We do not use AH,BH,CH,DH registers. // - We do not use AH,BH,CH,DH registers.
// - Floating-point types will live in the low natural slot of an sse2 register. // - Floating-point types will live in the low natural slot of an sse2 register.
// Unused portions are junk. // Unused portions are junk.
@ -335,7 +335,7 @@
// ADDQ $16, SI // ADDQ $16, SI
// MOVUPS X0, (DI) // MOVUPS X0, (DI)
// ADDQ $16, DI // ADDQ $16, DI
// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy. // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
// Large copying uses REP MOVSQ. // Large copying uses REP MOVSQ.
(Move [size] dst src mem) && size > 16*64 && size%8 == 0 -> (Move [size] dst src mem) && size > 16*64 && size%8 == 0 ->
@ -529,7 +529,7 @@
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even // because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g. // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
// (SHLW x (MOVWconst [24])), but just in case. // (SHLW x (MOVWconst [24])), but just in case.
(CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
@ -598,7 +598,7 @@
// sign extended loads // sign extended loads
// Note: The combined instruction must end up in the same block // Note: The combined instruction must end up in the same block
// as the original load. If not, we end up making a value with // as the original load. If not, we end up making a value with
// memory type live in two different blocks, which can lead to // memory type live in two different blocks, which can lead to
// multiple memory values alive simultaneously. // multiple memory values alive simultaneously.
(MOVBQSX (MOVBload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) (MOVBQSX (MOVBload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)

View File

@ -141,7 +141,7 @@ func init() {
// Suffixes encode the bit width of various instructions. // Suffixes encode the bit width of various instructions.
// Q = 64 bit, L = 32 bit, W = 16 bit, B = 8 bit // Q = 64 bit, L = 32 bit, W = 16 bit, B = 8 bit
// TODO: 2-address instructions. Mark ops as needing matching input/output regs. // TODO: 2-address instructions. Mark ops as needing matching input/output regs.
var AMD64ops = []opData{ var AMD64ops = []opData{
// fp ops // fp ops
{name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS"}, // fp32 add {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS"}, // fp32 add
@ -500,12 +500,12 @@ func init() {
// arg0=ptr/int arg1=mem, output=int/ptr // arg0=ptr/int arg1=mem, output=int/ptr
{name: "MOVQconvert", argLength: 2, reg: gp11nf, asm: "MOVQ"}, {name: "MOVQconvert", argLength: 2, reg: gp11nf, asm: "MOVQ"},
// Constant flag values. For any comparison, there are 5 possible // Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the // outcomes: the three from the signed total order (<,==,>) and the
// three from the unsigned total order. The == cases overlap. // three from the unsigned total order. The == cases overlap.
// Note: there's a sixth "unordered" outcome for floating-point // Note: there's a sixth "unordered" outcome for floating-point
// comparisons, but we don't use such a beast yet. // comparisons, but we don't use such a beast yet.
// These ops are for temporary use by rewrite rules. They // These ops are for temporary use by rewrite rules. They
// cannot appear in the generated assembly. // cannot appear in the generated assembly.
{name: "FlagEQ"}, // equal {name: "FlagEQ"}, // equal
{name: "FlagLT_ULT"}, // signed < and unsigned < {name: "FlagLT_ULT"}, // signed < and unsigned <

View File

@ -643,7 +643,7 @@
(Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)])) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
// strength reduction of divide by a constant. // strength reduction of divide by a constant.
// Note: frontend does <=32 bits. We only need to do 64 bits here. // Note: frontend does <=32 bits. We only need to do 64 bits here.
// TODO: Do them all here? // TODO: Do them all here?
// Div/mod by 1. Currently handled by frontend. // Div/mod by 1. Currently handled by frontend.

View File

@ -6,7 +6,7 @@ package main
var genericOps = []opData{ var genericOps = []opData{
// 2-input arithmetic // 2-input arithmetic
// Types must be consistent with Go typing. Add, for example, must take two values // Types must be consistent with Go typing. Add, for example, must take two values
// of the same type and produces that same type. // of the same type and produces that same type.
{name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1 {name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1
{name: "Add16", argLength: 2, commutative: true}, {name: "Add16", argLength: 2, commutative: true},
@ -250,7 +250,7 @@ var genericOps = []opData{
// arg0=ptr/int arg1=mem, output=int/ptr // arg0=ptr/int arg1=mem, output=int/ptr
{name: "Convert", argLength: 2}, {name: "Convert", argLength: 2},
// constants. Constant values are stored in the aux or // constants. Constant values are stored in the aux or
// auxint fields. // auxint fields.
{name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true {name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
{name: "ConstString", aux: "String"}, // value is aux.(string) {name: "ConstString", aux: "String"}, // value is aux.(string)
@ -270,7 +270,7 @@ var genericOps = []opData{
// The address of a variable. arg0 is the base pointer (SB or SP, depending // The address of a variable. arg0 is the base pointer (SB or SP, depending
// on whether it is a global or stack variable). The Aux field identifies the // on whether it is a global or stack variable). The Aux field identifies the
// variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP), // variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP),
// or *AutoSymbol (arg0=SP). // or *AutoSymbol (arg0=SP).
{name: "Addr", argLength: 1, aux: "Sym"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable. {name: "Addr", argLength: 1, aux: "Sym"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable.
@ -284,8 +284,8 @@ var genericOps = []opData{
{name: "Move", argLength: 3, aux: "Int64"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size. Returns memory. {name: "Move", argLength: 3, aux: "Int64"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size. Returns memory.
{name: "Zero", argLength: 2, aux: "Int64"}, // arg0=destptr, arg1=mem, auxint=size. Returns memory. {name: "Zero", argLength: 2, aux: "Int64"}, // arg0=destptr, arg1=mem, auxint=size. Returns memory.
// Function calls. Arguments to the call have already been written to the stack. // Function calls. Arguments to the call have already been written to the stack.
// Return values appear on the stack. The method receiver, if any, is treated // Return values appear on the stack. The method receiver, if any, is treated
// as a phantom first argument. // as a phantom first argument.
{name: "ClosureCall", argLength: 3, aux: "Int64"}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. {name: "ClosureCall", argLength: 3, aux: "Int64"}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
{name: "StaticCall", argLength: 1, aux: "SymOff"}, // call function aux.(*gc.Sym), arg0=memory. auxint=arg size. Returns memory. {name: "StaticCall", argLength: 1, aux: "SymOff"}, // call function aux.(*gc.Sym), arg0=memory. auxint=arg size. Returns memory.
@ -368,17 +368,17 @@ var genericOps = []opData{
{name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct. {name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct.
{name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field. {name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field.
// Spill&restore ops for the register allocator. These are // Spill&restore ops for the register allocator. These are
// semantically identical to OpCopy; they do not take/return // semantically identical to OpCopy; they do not take/return
// stores like regular memory ops do. We can get away without memory // stores like regular memory ops do. We can get away without memory
// args because we know there is no aliasing of spill slots on the stack. // args because we know there is no aliasing of spill slots on the stack.
{name: "StoreReg", argLength: 1}, {name: "StoreReg", argLength: 1},
{name: "LoadReg", argLength: 1}, {name: "LoadReg", argLength: 1},
// Used during ssa construction. Like Copy, but the arg has not been specified yet. // Used during ssa construction. Like Copy, but the arg has not been specified yet.
{name: "FwdRef"}, {name: "FwdRef"},
// Unknown value. Used for Values whose values don't matter because they are dead code. // Unknown value. Used for Values whose values don't matter because they are dead code.
{name: "Unknown"}, {name: "Unknown"},
{name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem {name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem

View File

@ -149,8 +149,8 @@ func genOp() {
} }
fmt.Fprintln(w, "reg:regInfo{") fmt.Fprintln(w, "reg:regInfo{")
// Compute input allocation order. We allocate from the // Compute input allocation order. We allocate from the
// most to the least constrained input. This order guarantees // most to the least constrained input. This order guarantees
// that we will always be able to find a register. // that we will always be able to find a register.
var s []intPair var s []intPair
for i, r := range v.reg.inputs { for i, r := range v.reg.inputs {

View File

@ -39,8 +39,8 @@ import (
// variable ::= some token // variable ::= some token
// opcode ::= one of the opcodes from ../op.go (without the Op prefix) // opcode ::= one of the opcodes from ../op.go (without the Op prefix)
// extra conditions is just a chunk of Go that evaluates to a boolean. It may use // extra conditions is just a chunk of Go that evaluates to a boolean. It may use
// variables declared in the matching sexpr. The variable "v" is predefined to be // variables declared in the matching sexpr. The variable "v" is predefined to be
// the value matched by the entire rule. // the value matched by the entire rule.
// If multiple rules match, the first one in file order is selected. // If multiple rules match, the first one in file order is selected.
@ -93,8 +93,8 @@ func genRules(arch arch) {
lineno++ lineno++
line := scanner.Text() line := scanner.Text()
if i := strings.Index(line, "//"); i >= 0 { if i := strings.Index(line, "//"); i >= 0 {
// Remove comments. Note that this isn't string safe, so // Remove comments. Note that this isn't string safe, so
// it will truncate lines with // inside strings. Oh well. // it will truncate lines with // inside strings. Oh well.
line = line[:i] line = line[:i]
} }
rule += " " + line rule += " " + line
@ -159,7 +159,7 @@ func genRules(arch arch) {
fmt.Fprintf(w, "return false\n") fmt.Fprintf(w, "return false\n")
fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "}\n")
// Generate a routine per op. Note that we don't make one giant routine // Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers. // because it is too big for some compilers.
for _, op := range ops { for _, op := range ops {
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch)) fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch))
@ -190,7 +190,7 @@ func genRules(arch arch) {
fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "}\n")
} }
// Generate block rewrite function. There are only a few block types // Generate block rewrite function. There are only a few block types
// so we can make this one function with a switch. // so we can make this one function with a switch.
fmt.Fprintf(w, "func rewriteBlock%s(b *Block) bool {\n", arch.name) fmt.Fprintf(w, "func rewriteBlock%s(b *Block) bool {\n", arch.name)
fmt.Fprintf(w, "switch b.Kind {\n") fmt.Fprintf(w, "switch b.Kind {\n")
@ -229,7 +229,7 @@ func genRules(arch arch) {
fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond) fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond)
} }
// Rule matches. Generate result. // Rule matches. Generate result.
t := split(result[1 : len(result)-1]) // remove parens, then split t := split(result[1 : len(result)-1]) // remove parens, then split
newsuccs := t[2:] newsuccs := t[2:]
@ -316,7 +316,7 @@ func genMatch(w io.Writer, arch arch, match string) {
func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top bool) { func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top bool) {
if match[0] != '(' { if match[0] != '(' {
if _, ok := m[match]; ok { if _, ok := m[match]; ok {
// variable already has a definition. Check whether // variable already has a definition. Check whether
// the old definition and the new definition match. // the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality // For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering). // on Values (so cse is important to do before lowering).
@ -332,7 +332,7 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top
return return
} }
// split body up into regions. Split by spaces/tabs, except those // split body up into regions. Split by spaces/tabs, except those
// contained in () or {}. // contained in () or {}.
s := split(match[1 : len(match)-1]) // remove parens, then split s := split(match[1 : len(match)-1]) // remove parens, then split
@ -348,7 +348,7 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top
// type restriction // type restriction
t := a[1 : len(a)-1] // remove <> t := a[1 : len(a)-1] // remove <>
if !isVariable(t) { if !isVariable(t) {
// code. We must match the results of this code. // code. We must match the results of this code.
fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t) fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
} else { } else {
// variable // variable

View File

@ -76,7 +76,7 @@ func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction Br
func likelyadjust(f *Func) { func likelyadjust(f *Func) {
// The values assigned to certain and local only matter // The values assigned to certain and local only matter
// in their rank order. 0 is default, more positive // in their rank order. 0 is default, more positive
// is less likely. It's possible to assign a negative // is less likely. It's possible to assign a negative
// unlikeliness (though not currently the case). // unlikeliness (though not currently the case).
certain := make([]int8, f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit certain := make([]int8, f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit
local := make([]int8, f.NumBlocks()) // for our immediate predecessors. local := make([]int8, f.NumBlocks()) // for our immediate predecessors.
@ -113,7 +113,7 @@ func likelyadjust(f *Func) {
// Notice that this can act like a "reset" on unlikeliness at loops; the // Notice that this can act like a "reset" on unlikeliness at loops; the
// default "everything returns" unlikeliness is erased by min with the // default "everything returns" unlikeliness is erased by min with the
// backedge likeliness; however a loop with calls on every path will be // backedge likeliness; however a loop with calls on every path will be
// tagged with call cost. Net effect is that loop entry is favored. // tagged with call cost. Net effect is that loop entry is favored.
b0 := b.Succs[0].ID b0 := b.Succs[0].ID
b1 := b.Succs[1].ID b1 := b.Succs[1].ID
certain[b.ID] = min8(certain[b0], certain[b1]) certain[b.ID] = min8(certain[b0], certain[b1])
@ -204,7 +204,7 @@ func (l *loop) LongString() string {
// nearestOuterLoop returns the outer loop of loop most nearly // nearestOuterLoop returns the outer loop of loop most nearly
// containing block b; the header must dominate b. loop itself // containing block b; the header must dominate b. loop itself
// is assumed to not be that loop. For acceptable performance, // is assumed to not be that loop. For acceptable performance,
// we're relying on loop nests to not be terribly deep. // we're relying on loop nests to not be terribly deep.
func (l *loop) nearestOuterLoop(sdom sparseTree, b *Block) *loop { func (l *loop) nearestOuterLoop(sdom sparseTree, b *Block) *loop {
var o *loop var o *loop

View File

@ -6,7 +6,7 @@ package ssa
// A copy of the code in ../gc/subr.go. // A copy of the code in ../gc/subr.go.
// We can't use it directly because it would generate // We can't use it directly because it would generate
// an import cycle. TODO: move to a common support package. // an import cycle. TODO: move to a common support package.
// argument passing to/from // argument passing to/from
// smagic and umagic // smagic and umagic

View File

@ -43,7 +43,7 @@ func nilcheckelim(f *Func) {
work = append(work, bp{block: f.Entry}) work = append(work, bp{block: f.Entry})
// map from value ID to bool indicating if value is known to be non-nil // map from value ID to bool indicating if value is known to be non-nil
// in the current dominator path being walked. This slice is updated by // in the current dominator path being walked. This slice is updated by
// walkStates to maintain the known non-nil values. // walkStates to maintain the known non-nil values.
nonNilValues := make([]bool, f.NumValues()) nonNilValues := make([]bool, f.NumValues())

View File

@ -52,7 +52,7 @@ const (
auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff
) )
// A ValAndOff is used by the several opcodes. It holds // A ValAndOff is used by the several opcodes. It holds
// both a value and a pointer offset. // both a value and a pointer offset.
// A ValAndOff is intended to be encoded into an AuxInt field. // A ValAndOff is intended to be encoded into an AuxInt field.
// The zero ValAndOff encodes a value of 0 and an offset of 0. // The zero ValAndOff encodes a value of 0 and an offset of 0.

View File

@ -5,8 +5,8 @@
package ssa package ssa
// phielim eliminates redundant phi values from f. // phielim eliminates redundant phi values from f.
// A phi is redundant if its arguments are all equal. For // A phi is redundant if its arguments are all equal. For
// purposes of counting, ignore the phi itself. Both of // purposes of counting, ignore the phi itself. Both of
// these phis are redundant: // these phis are redundant:
// v = phi(x,x,x) // v = phi(x,x,x)
// v = phi(x,v,x,v) // v = phi(x,v,x,v)
@ -58,8 +58,8 @@ func phielimValue(v *Value) bool {
} }
if w == nil { if w == nil {
// v references only itself. It must be in // v references only itself. It must be in
// a dead code loop. Don't bother modifying it. // a dead code loop. Don't bother modifying it.
return false return false
} }
v.Op = OpCopy v.Op = OpCopy

View File

@ -4,9 +4,9 @@
// Register allocation. // Register allocation.
// //
// We use a version of a linear scan register allocator. We treat the // We use a version of a linear scan register allocator. We treat the
// whole function as a single long basic block and run through // whole function as a single long basic block and run through
// it using a greedy register allocator. Then all merge edges // it using a greedy register allocator. Then all merge edges
// (those targeting a block with len(Preds)>1) are processed to // (those targeting a block with len(Preds)>1) are processed to
// shuffle data into the place that the target of the edge expects. // shuffle data into the place that the target of the edge expects.
// //
@ -15,7 +15,7 @@
// value whose next use is farthest in the future. // value whose next use is farthest in the future.
// //
// The register allocator requires that a block is not scheduled until // The register allocator requires that a block is not scheduled until
// at least one of its predecessors have been scheduled. The most recent // at least one of its predecessors have been scheduled. The most recent
// such predecessor provides the starting register state for a block. // such predecessor provides the starting register state for a block.
// //
// It also requires that there are no critical edges (critical = // It also requires that there are no critical edges (critical =
@ -29,28 +29,28 @@
// For every value, we generate a spill immediately after the value itself. // For every value, we generate a spill immediately after the value itself.
// x = Op y z : AX // x = Op y z : AX
// x2 = StoreReg x // x2 = StoreReg x
// While AX still holds x, any uses of x will use that value. When AX is needed // While AX still holds x, any uses of x will use that value. When AX is needed
// for another value, we simply reuse AX. Spill code has already been generated // for another value, we simply reuse AX. Spill code has already been generated
// so there is no code generated at "spill" time. When x is referenced // so there is no code generated at "spill" time. When x is referenced
// subsequently, we issue a load to restore x to a register using x2 as // subsequently, we issue a load to restore x to a register using x2 as
// its argument: // its argument:
// x3 = Restore x2 : CX // x3 = Restore x2 : CX
// x3 can then be used wherever x is referenced again. // x3 can then be used wherever x is referenced again.
// If the spill (x2) is never used, it will be removed at the end of regalloc. // If the spill (x2) is never used, it will be removed at the end of regalloc.
// //
// Phi values are special, as always. We define two kinds of phis, those // Phi values are special, as always. We define two kinds of phis, those
// where the merge happens in a register (a "register" phi) and those where // where the merge happens in a register (a "register" phi) and those where
// the merge happens in a stack location (a "stack" phi). // the merge happens in a stack location (a "stack" phi).
// //
// A register phi must have the phi and all of its inputs allocated to the // A register phi must have the phi and all of its inputs allocated to the
// same register. Register phis are spilled similarly to regular ops: // same register. Register phis are spilled similarly to regular ops:
// b1: y = ... : AX b2: z = ... : AX // b1: y = ... : AX b2: z = ... : AX
// goto b3 goto b3 // goto b3 goto b3
// b3: x = phi(y, z) : AX // b3: x = phi(y, z) : AX
// x2 = StoreReg x // x2 = StoreReg x
// //
// A stack phi must have the phi and all of its inputs allocated to the same // A stack phi must have the phi and all of its inputs allocated to the same
// stack location. Stack phis start out life already spilled - each phi // stack location. Stack phis start out life already spilled - each phi
// input must be a store (using StoreReg) at the end of the corresponding // input must be a store (using StoreReg) at the end of the corresponding
// predecessor block. // predecessor block.
// b1: y = ... : AX b2: z = ... : BX // b1: y = ... : AX b2: z = ... : BX
@ -64,12 +64,12 @@
// TODO // TODO
// Use an affinity graph to mark two values which should use the // Use an affinity graph to mark two values which should use the
// same register. This affinity graph will be used to prefer certain // same register. This affinity graph will be used to prefer certain
// registers for allocation. This affinity helps eliminate moves that // registers for allocation. This affinity helps eliminate moves that
// are required for phi implementations and helps generate allocations // are required for phi implementations and helps generate allocations
// for 2-register architectures. // for 2-register architectures.
// Note: regalloc generates a not-quite-SSA output. If we have: // Note: regalloc generates a not-quite-SSA output. If we have:
// //
// b1: x = ... : AX // b1: x = ... : AX
// x2 = StoreReg x // x2 = StoreReg x
@ -85,8 +85,8 @@
// add a x4:CX->BX copy at the end of b4. // add a x4:CX->BX copy at the end of b4.
// But the definition of x3 doesn't dominate b2. We should really // But the definition of x3 doesn't dominate b2. We should really
// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep // insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
// SSA form. For now, we ignore this problem as remaining in strict // SSA form. For now, we ignore this problem as remaining in strict
// SSA form isn't needed after regalloc. We'll just leave the use // SSA form isn't needed after regalloc. We'll just leave the use
// of x3 not dominated by the definition of x3, and the CX->BX copy // of x3 not dominated by the definition of x3, and the CX->BX copy
// will have no use (so don't run deadcode after regalloc!). // will have no use (so don't run deadcode after regalloc!).
// TODO: maybe we should introduce these extra phis? // TODO: maybe we should introduce these extra phis?
@ -102,7 +102,7 @@ import (
const regDebug = false // TODO: compiler flag const regDebug = false // TODO: compiler flag
const logSpills = false const logSpills = false
// regalloc performs register allocation on f. It sets f.RegAlloc // regalloc performs register allocation on f. It sets f.RegAlloc
// to the resulting allocation. // to the resulting allocation.
func regalloc(f *Func) { func regalloc(f *Func) {
var s regAllocState var s regAllocState
@ -276,7 +276,7 @@ type startReg struct {
vid ID // pre-regalloc value needed in this register vid ID // pre-regalloc value needed in this register
} }
// freeReg frees up register r. Any current user of r is kicked out. // freeReg frees up register r. Any current user of r is kicked out.
func (s *regAllocState) freeReg(r register) { func (s *regAllocState) freeReg(r register) {
v := s.regs[r].v v := s.regs[r].v
if v == nil { if v == nil {
@ -355,18 +355,18 @@ func (s *regAllocState) allocReg(v *Value, mask regMask) register {
return pickReg(mask) return pickReg(mask)
} }
// Pick a value to spill. Spill the value with the // Pick a value to spill. Spill the value with the
// farthest-in-the-future use. // farthest-in-the-future use.
// TODO: Prefer registers with already spilled Values? // TODO: Prefer registers with already spilled Values?
// TODO: Modify preference using affinity graph. // TODO: Modify preference using affinity graph.
// TODO: if a single value is in multiple registers, spill one of them // TODO: if a single value is in multiple registers, spill one of them
// before spilling a value in just a single register. // before spilling a value in just a single register.
// SP and SB are allocated specially. No regular value should // SP and SB are allocated specially. No regular value should
// be allocated to them. // be allocated to them.
mask &^= 1<<4 | 1<<32 mask &^= 1<<4 | 1<<32
// Find a register to spill. We spill the register containing the value // Find a register to spill. We spill the register containing the value
// whose next use is as far in the future as possible. // whose next use is as far in the future as possible.
// https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm // https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
var r register var r register
@ -378,7 +378,7 @@ func (s *regAllocState) allocReg(v *Value, mask regMask) register {
v := s.regs[t].v v := s.regs[t].v
if n := s.values[v.ID].uses.dist; n > maxuse { if n := s.values[v.ID].uses.dist; n > maxuse {
// v's next use is farther in the future than any value // v's next use is farther in the future than any value
// we've seen so far. A new best spill candidate. // we've seen so far. A new best spill candidate.
r = t r = t
maxuse = n maxuse = n
} }
@ -476,7 +476,7 @@ func (s *regAllocState) init(f *Func) {
} }
s.computeLive() s.computeLive()
// Compute block order. This array allows us to distinguish forward edges // Compute block order. This array allows us to distinguish forward edges
// from backward edges and compute how far they go. // from backward edges and compute how far they go.
blockOrder := make([]int32, f.NumBlocks()) blockOrder := make([]int32, f.NumBlocks())
for i, b := range f.Blocks { for i, b := range f.Blocks {
@ -589,7 +589,7 @@ func (s *regAllocState) regalloc(f *Func) {
liveSet.remove(v.ID) liveSet.remove(v.ID)
if v.Op == OpPhi { if v.Op == OpPhi {
// Remove v from the live set, but don't add // Remove v from the live set, but don't add
// any inputs. This is the state the len(b.Preds)>1 // any inputs. This is the state the len(b.Preds)>1
// case below desires; it wants to process phis specially. // case below desires; it wants to process phis specially.
continue continue
} }
@ -653,7 +653,7 @@ func (s *regAllocState) regalloc(f *Func) {
} }
} }
} else { } else {
// This is the complicated case. We have more than one predecessor, // This is the complicated case. We have more than one predecessor,
// which means we may have Phi ops. // which means we may have Phi ops.
// Copy phi ops into new schedule. // Copy phi ops into new schedule.
@ -674,7 +674,7 @@ func (s *regAllocState) regalloc(f *Func) {
} }
} }
// Decide on registers for phi ops. Use the registers determined // Decide on registers for phi ops. Use the registers determined
// by the primary predecessor if we can. // by the primary predecessor if we can.
// TODO: pick best of (already processed) predecessors? // TODO: pick best of (already processed) predecessors?
// Majority vote? Deepest nesting level? // Majority vote? Deepest nesting level?
@ -728,7 +728,7 @@ func (s *regAllocState) regalloc(f *Func) {
} }
} }
// Set registers for phis. Add phi spill code. // Set registers for phis. Add phi spill code.
for i, v := range phis { for i, v := range phis {
if !s.values[v.ID].needReg { if !s.values[v.ID].needReg {
continue continue
@ -861,8 +861,8 @@ func (s *regAllocState) regalloc(f *Func) {
continue continue
} }
if v.Op == OpArg { if v.Op == OpArg {
// Args are "pre-spilled" values. We don't allocate // Args are "pre-spilled" values. We don't allocate
// any register here. We just set up the spill pointer to // any register here. We just set up the spill pointer to
// point at itself and any later user will restore it to use it. // point at itself and any later user will restore it to use it.
s.values[v.ID].spill = v s.values[v.ID].spill = v
s.values[v.ID].spillUsed = true // use is guaranteed s.values[v.ID].spillUsed = true // use is guaranteed
@ -886,7 +886,7 @@ func (s *regAllocState) regalloc(f *Func) {
continue continue
} }
// Move arguments to registers. Process in an ordering defined // Move arguments to registers. Process in an ordering defined
// by the register specification (most constrained first). // by the register specification (most constrained first).
args = append(args[:0], v.Args...) args = append(args[:0], v.Args...)
for _, i := range regspec.inputs { for _, i := range regspec.inputs {
@ -926,7 +926,7 @@ func (s *regAllocState) regalloc(f *Func) {
} }
b.Values = append(b.Values, v) b.Values = append(b.Values, v)
// Issue a spill for this value. We issue spills unconditionally, // Issue a spill for this value. We issue spills unconditionally,
// then at the end of regalloc delete the ones we never use. // then at the end of regalloc delete the ones we never use.
// TODO: schedule the spill at a point that dominates all restores. // TODO: schedule the spill at a point that dominates all restores.
// The restore may be off in an unlikely branch somewhere and it // The restore may be off in an unlikely branch somewhere and it
@ -1002,7 +1002,7 @@ func (s *regAllocState) regalloc(f *Func) {
// If a value is live at the end of the block and // If a value is live at the end of the block and
// isn't in a register, remember that its spill location // isn't in a register, remember that its spill location
// is live. We need to remember this information so that // is live. We need to remember this information so that
// the liveness analysis in stackalloc is correct. // the liveness analysis in stackalloc is correct.
for _, e := range s.live[b.ID] { for _, e := range s.live[b.ID] {
if s.values[e.ID].regs != 0 { if s.values[e.ID].regs != 0 {
@ -1201,7 +1201,7 @@ func (e *edgeState) process() {
} }
} }
if i < len(dsts) { if i < len(dsts) {
// Made some progress. Go around again. // Made some progress. Go around again.
dsts = dsts[:i] dsts = dsts[:i]
// Append any extras destinations we generated. // Append any extras destinations we generated.
@ -1210,7 +1210,7 @@ func (e *edgeState) process() {
continue continue
} }
// We made no progress. That means that any // We made no progress. That means that any
// remaining unsatisfied moves are in simple cycles. // remaining unsatisfied moves are in simple cycles.
// For example, A -> B -> C -> D -> A. // For example, A -> B -> C -> D -> A.
// A ----> B // A ----> B
@ -1229,7 +1229,7 @@ func (e *edgeState) process() {
// When we resume the outer loop, the A->B move can now proceed, // When we resume the outer loop, the A->B move can now proceed,
// and eventually the whole cycle completes. // and eventually the whole cycle completes.
// Copy any cycle location to a temp register. This duplicates // Copy any cycle location to a temp register. This duplicates
// one of the cycle entries, allowing the just duplicated value // one of the cycle entries, allowing the just duplicated value
// to be overwritten and the cycle to proceed. // to be overwritten and the cycle to proceed.
loc := dsts[0].loc loc := dsts[0].loc
@ -1248,7 +1248,7 @@ func (e *edgeState) process() {
} }
} }
// processDest generates code to put value vid into location loc. Returns true // processDest generates code to put value vid into location loc. Returns true
// if progress was made. // if progress was made.
func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool { func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
occupant := e.contents[loc] occupant := e.contents[loc]
@ -1258,7 +1258,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
if splice != nil { if splice != nil {
*splice = occupant.c *splice = occupant.c
} }
// Note: if splice==nil then c will appear dead. This is // Note: if splice==nil then c will appear dead. This is
// non-SSA formed code, so be careful after this pass not to run // non-SSA formed code, so be careful after this pass not to run
// deadcode elimination. // deadcode elimination.
return true return true
@ -1306,7 +1306,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
if dstReg { if dstReg {
x = v.copyInto(e.p) x = v.copyInto(e.p)
} else { } else {
// Rematerialize into stack slot. Need a free // Rematerialize into stack slot. Need a free
// register to accomplish this. // register to accomplish this.
e.erase(loc) // see pre-clobber comment below e.erase(loc) // see pre-clobber comment below
r := e.findRegFor(v.Type) r := e.findRegFor(v.Type)
@ -1330,15 +1330,15 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
if dstReg { if dstReg {
x = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) x = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c)
} else { } else {
// mem->mem. Use temp register. // mem->mem. Use temp register.
// Pre-clobber destination. This avoids the // Pre-clobber destination. This avoids the
// following situation: // following situation:
// - v is currently held in R0 and stacktmp0. // - v is currently held in R0 and stacktmp0.
// - We want to copy stacktmp1 to stacktmp0. // - We want to copy stacktmp1 to stacktmp0.
// - We choose R0 as the temporary register. // - We choose R0 as the temporary register.
// During the copy, both R0 and stacktmp0 are // During the copy, both R0 and stacktmp0 are
// clobbered, losing both copies of v. Oops! // clobbered, losing both copies of v. Oops!
// Erasing the destination early means R0 will not // Erasing the destination early means R0 will not
// be chosen as the temp register, as it will then // be chosen as the temp register, as it will then
// be the last copy of v. // be the last copy of v.
@ -1438,7 +1438,7 @@ func (e *edgeState) findRegFor(typ Type) Location {
m = e.s.compatRegs(e.s.f.Config.fe.TypeInt64()) m = e.s.compatRegs(e.s.f.Config.fe.TypeInt64())
} }
// Pick a register. In priority order: // Pick a register. In priority order:
// 1) an unused register // 1) an unused register
// 2) a non-unique register not holding a final value // 2) a non-unique register not holding a final value
// 3) a non-unique register // 3) a non-unique register
@ -1455,9 +1455,9 @@ func (e *edgeState) findRegFor(typ Type) Location {
return &registers[pickReg(x)] return &registers[pickReg(x)]
} }
// No register is available. Allocate a temp location to spill a register to. // No register is available. Allocate a temp location to spill a register to.
// The type of the slot is immaterial - it will not be live across // The type of the slot is immaterial - it will not be live across
// any safepoint. Just use a type big enough to hold any register. // any safepoint. Just use a type big enough to hold any register.
typ = e.s.f.Config.fe.TypeInt64() typ = e.s.f.Config.fe.TypeInt64()
t := LocalSlot{e.s.f.Config.fe.Auto(typ), typ, 0} t := LocalSlot{e.s.f.Config.fe.Auto(typ), typ, 0}
// TODO: reuse these slots. // TODO: reuse these slots.
@ -1471,7 +1471,7 @@ func (e *edgeState) findRegFor(typ Type) Location {
if regDebug { if regDebug {
fmt.Printf(" SPILL %s->%s %s\n", r.Name(), t.Name(), x.LongString()) fmt.Printf(" SPILL %s->%s %s\n", r.Name(), t.Name(), x.LongString())
} }
// r will now be overwritten by the caller. At some point // r will now be overwritten by the caller. At some point
// later, the newly saved value will be moved back to its // later, the newly saved value will be moved back to its
// final destination in processDest. // final destination in processDest.
return r return r
@ -1508,10 +1508,10 @@ type liveInfo struct {
} }
// computeLive computes a map from block ID to a list of value IDs live at the end // computeLive computes a map from block ID to a list of value IDs live at the end
// of that block. Together with the value ID is a count of how many instructions // of that block. Together with the value ID is a count of how many instructions
// to the next use of that value. The resulting map is stored at s.live. // to the next use of that value. The resulting map is stored at s.live.
// TODO: this could be quadratic if lots of variables are live across lots of // TODO: this could be quadratic if lots of variables are live across lots of
// basic blocks. Figure out a way to make this function (or, more precisely, the user // basic blocks. Figure out a way to make this function (or, more precisely, the user
// of this function) require only linear size & time. // of this function) require only linear size & time.
func (s *regAllocState) computeLive() { func (s *regAllocState) computeLive() {
f := s.f f := s.f

View File

@ -105,7 +105,7 @@ func addOff(x, y int64) int64 {
return z return z
} }
// mergeSym merges two symbolic offsets. There is no real merging of // mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one. // offsets, we just pick the non-nil one.
func mergeSym(x, y interface{}) interface{} { func mergeSym(x, y interface{}) interface{} {
if x == nil { if x == nil {

View File

@ -15,10 +15,10 @@ const (
ScoreCount // not a real score ScoreCount // not a real score
) )
// Schedule the Values in each Block. After this phase returns, the // Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values // order of b.Values matters and is the order in which those values
// will appear in the assembly output. For now it generates a // will appear in the assembly output. For now it generates a
// reasonable valid schedule using a priority queue. TODO(khr): // reasonable valid schedule using a priority queue. TODO(khr):
// schedule smarter. // schedule smarter.
func schedule(f *Func) { func schedule(f *Func) {
// For each value, the number of times it is used in the block // For each value, the number of times it is used in the block
@ -28,7 +28,7 @@ func schedule(f *Func) {
// "priority" for a value // "priority" for a value
score := make([]uint8, f.NumValues()) score := make([]uint8, f.NumValues())
// scheduling order. We queue values in this list in reverse order. // scheduling order. We queue values in this list in reverse order.
var order []*Value var order []*Value
// priority queue of legally schedulable (0 unscheduled uses) values // priority queue of legally schedulable (0 unscheduled uses) values
@ -36,7 +36,7 @@ func schedule(f *Func) {
// maps mem values to the next live memory value // maps mem values to the next live memory value
nextMem := make([]*Value, f.NumValues()) nextMem := make([]*Value, f.NumValues())
// additional pretend arguments for each Value. Used to enforce load/store ordering. // additional pretend arguments for each Value. Used to enforce load/store ordering.
additionalArgs := make([][]*Value, f.NumValues()) additionalArgs := make([][]*Value, f.NumValues())
for _, b := range f.Blocks { for _, b := range f.Blocks {
@ -77,12 +77,12 @@ func schedule(f *Func) {
uses[v.ID]++ uses[v.ID]++
} }
} }
// Compute score. Larger numbers are scheduled closer to the end of the block. // Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values { for _, v := range b.Values {
switch { switch {
case v.Op == OpAMD64LoweredGetClosurePtr: case v.Op == OpAMD64LoweredGetClosurePtr:
// We also score GetLoweredClosurePtr as early as possible to ensure that the // We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear // context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no // in the entry block where there are no phi functions, so there is no
// conflict or ambiguity here. // conflict or ambiguity here.
if b != f.Entry { if b != f.Entry {
@ -96,8 +96,8 @@ func schedule(f *Func) {
// We want all the vardefs next. // We want all the vardefs next.
score[v.ID] = ScoreVarDef score[v.ID] = ScoreVarDef
case v.Type.IsMemory(): case v.Type.IsMemory():
// Schedule stores as early as possible. This tends to // Schedule stores as early as possible. This tends to
// reduce register pressure. It also helps make sure // reduce register pressure. It also helps make sure
// VARDEF ops are scheduled before the corresponding LEA. // VARDEF ops are scheduled before the corresponding LEA.
score[v.ID] = ScoreMemory score[v.ID] = ScoreMemory
case v.Type.IsFlags(): case v.Type.IsFlags():
@ -117,7 +117,7 @@ func schedule(f *Func) {
// Schedule values dependent on the control value at the end. // Schedule values dependent on the control value at the end.
// This reduces the number of register spills. We don't find // This reduces the number of register spills. We don't find
// all values that depend on the control, just values with a // all values that depend on the control, just values with a
// direct dependency. This is cheaper and in testing there // direct dependency. This is cheaper and in testing there
// was no difference in the number of spills. // was no difference in the number of spills.
for _, v := range b.Values { for _, v := range b.Values {
if v.Op != OpPhi { if v.Op != OpPhi {

View File

@ -99,7 +99,7 @@ func (t sparseTree) numberBlock(b *Block, n int32) int32 {
// Sibling returns a sibling of x in the dominator tree (i.e., // Sibling returns a sibling of x in the dominator tree (i.e.,
// a node with the same immediate dominator) or nil if there // a node with the same immediate dominator) or nil if there
// are no remaining siblings in the arbitrary but repeatable // are no remaining siblings in the arbitrary but repeatable
// order chosen. Because the Child-Sibling order is used // order chosen. Because the Child-Sibling order is used
// to assign entry and exit numbers in the treewalk, those // to assign entry and exit numbers in the treewalk, those
// numbers are also consistent with this order (i.e., // numbers are also consistent with this order (i.e.,
// Sibling(x) has entry number larger than x's exit number). // Sibling(x) has entry number larger than x's exit number).
@ -108,7 +108,7 @@ func (t sparseTree) Sibling(x *Block) *Block {
} }
// Child returns a child of x in the dominator tree, or // Child returns a child of x in the dominator tree, or
// nil if there are none. The choice of first child is // nil if there are none. The choice of first child is
// arbitrary but repeatable. // arbitrary but repeatable.
func (t sparseTree) Child(x *Block) *Block { func (t sparseTree) Child(x *Block) *Block {
return t[x.ID].child return t[x.ID].child

View File

@ -91,8 +91,8 @@ func (s *stackAllocState) stackalloc() {
// For each type, we keep track of all the stack slots we // For each type, we keep track of all the stack slots we
// have allocated for that type. // have allocated for that type.
// TODO: share slots among equivalent types. We would need to // TODO: share slots among equivalent types. We would need to
// only share among types with the same GC signature. See the // only share among types with the same GC signature. See the
// type.Equal calls below for where this matters. // type.Equal calls below for where this matters.
locations := map[Type][]LocalSlot{} locations := map[Type][]LocalSlot{}
@ -177,7 +177,7 @@ func (s *stackAllocState) stackalloc() {
// computeLive computes a map from block ID to a list of // computeLive computes a map from block ID to a list of
// stack-slot-needing value IDs live at the end of that block. // stack-slot-needing value IDs live at the end of that block.
// TODO: this could be quadratic if lots of variables are live across lots of // TODO: this could be quadratic if lots of variables are live across lots of
// basic blocks. Figure out a way to make this function (or, more precisely, the user // basic blocks. Figure out a way to make this function (or, more precisely, the user
// of this function) require only linear size & time. // of this function) require only linear size & time.
func (s *stackAllocState) computeLive(spillLive [][]ID) { func (s *stackAllocState) computeLive(spillLive [][]ID) {
s.live = make([][]ID, s.f.NumBlocks()) s.live = make([][]ID, s.f.NumBlocks())
@ -206,7 +206,7 @@ func (s *stackAllocState) computeLive(spillLive [][]ID) {
if v.Op == OpPhi { if v.Op == OpPhi {
// Save phi for later. // Save phi for later.
// Note: its args might need a stack slot even though // Note: its args might need a stack slot even though
// the phi itself doesn't. So don't use needSlot. // the phi itself doesn't. So don't use needSlot.
if !v.Type.IsMemory() && !v.Type.IsVoid() { if !v.Type.IsMemory() && !v.Type.IsVoid() {
phis = append(phis, v) phis = append(phis, v)
} }
@ -299,7 +299,7 @@ func (s *stackAllocState) buildInterferenceGraph() {
if v.Op == OpArg && s.values[v.ID].needSlot { if v.Op == OpArg && s.values[v.ID].needSlot {
// OpArg is an input argument which is pre-spilled. // OpArg is an input argument which is pre-spilled.
// We add back v.ID here because we want this value // We add back v.ID here because we want this value
// to appear live even before this point. Being live // to appear live even before this point. Being live
// all the way to the start of the entry block prevents other // all the way to the start of the entry block prevents other
// values from being allocated to the same slot and clobbering // values from being allocated to the same slot and clobbering
// the input value before we have a chance to load it. // the input value before we have a chance to load it.

View File

@ -10,21 +10,21 @@ import (
) )
// A Value represents a value in the SSA representation of the program. // A Value represents a value in the SSA representation of the program.
// The ID and Type fields must not be modified. The remainder may be modified // The ID and Type fields must not be modified. The remainder may be modified
// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)). // if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
type Value struct { type Value struct {
// A unique identifier for the value. For performance we allocate these IDs // A unique identifier for the value. For performance we allocate these IDs
// densely starting at 1. There is no guarantee that there won't be occasional holes, though. // densely starting at 1. There is no guarantee that there won't be occasional holes, though.
ID ID ID ID
// The operation that computes this value. See op.go. // The operation that computes this value. See op.go.
Op Op Op Op
// The type of this value. Normally this will be a Go type, but there // The type of this value. Normally this will be a Go type, but there
// are a few other pseudo-types, see type.go. // are a few other pseudo-types, see type.go.
Type Type Type Type
// Auxiliary info for this value. The type of this information depends on the opcode and type. // Auxiliary info for this value. The type of this information depends on the opcode and type.
// AuxInt is used for integer values, Aux is used for other values. // AuxInt is used for integer values, Aux is used for other values.
AuxInt int64 AuxInt int64
Aux interface{} Aux interface{}
@ -49,7 +49,7 @@ type Value struct {
// OpConst int64 0 int64 constant // OpConst int64 0 int64 constant
// OpAddcq int64 1 amd64 op: v = arg[0] + constant // OpAddcq int64 1 amd64 op: v = arg[0] + constant
// short form print. Just v#. // short form print. Just v#.
func (v *Value) String() string { func (v *Value) String() string {
if v == nil { if v == nil {
return "nil" // should never happen, but not panicking helps with debugging return "nil" // should never happen, but not panicking helps with debugging

View File

@ -95,7 +95,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
split64(r, &lo2, &hi2) split64(r, &lo2, &hi2)
} }
// Do op. Leave result in DX:AX. // Do op. Leave result in DX:AX.
switch n.Op { switch n.Op {
// TODO: Constants // TODO: Constants
case gc.OADD: case gc.OADD:

View File

@ -1511,7 +1511,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
// The way the code generator uses floating-point // The way the code generator uses floating-point
// registers, a move from F0 to F0 is intended as a no-op. // registers, a move from F0 to F0 is intended as a no-op.
// On the x86, it's not: it pushes a second copy of F0 // On the x86, it's not: it pushes a second copy of F0
// on the floating point stack. So toss it away here. // on the floating point stack. So toss it away here.
// Also, F0 is the *only* register we ever evaluate // Also, F0 is the *only* register we ever evaluate
// into, so we should only see register/register as F0/F0. // into, so we should only see register/register as F0/F0.
/* /*

View File

@ -221,7 +221,7 @@ loop1:
// MOVSD removal. // MOVSD removal.
// We never use packed registers, so a MOVSD between registers // We never use packed registers, so a MOVSD between registers
// can be replaced by MOVAPD, which moves the pair of float64s // can be replaced by MOVAPD, which moves the pair of float64s
// instead of just the lower one. We only use the lower one, but // instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both. // the processor can do better if we do moves using both.
for r := g.Start; r != nil; r = r.Link { for r := g.Start; r != nil; r = r.Link {
p = r.Prog p = r.Prog

View File

@ -311,8 +311,8 @@ func (f *File) addImport(path string) string {
var slashslash = []byte("//") var slashslash = []byte("//")
// initialComments returns the prefix of content containing only // initialComments returns the prefix of content containing only
// whitespace and line comments. Any +build directives must appear // whitespace and line comments. Any +build directives must appear
// within this region. This approach is more reliable than using // within this region. This approach is more reliable than using
// go/printer to print a modified AST containing comments. // go/printer to print a modified AST containing comments.
// //
func initialComments(content []byte) []byte { func initialComments(content []byte) []byte {

View File

@ -284,7 +284,7 @@ func findgoversion() string {
} }
// The $GOROOT/VERSION.cache file is a cache to avoid invoking // The $GOROOT/VERSION.cache file is a cache to avoid invoking
// git every time we run this command. Unlike VERSION, it gets // git every time we run this command. Unlike VERSION, it gets
// deleted by the clean command. // deleted by the clean command.
path = pathf("%s/VERSION.cache", goroot) path = pathf("%s/VERSION.cache", goroot)
if isfile(path) { if isfile(path) {
@ -399,8 +399,8 @@ func setup() {
// Create object directory. // Create object directory.
// We keep it in pkg/ so that all the generated binaries // We keep it in pkg/ so that all the generated binaries
// are in one tree. If pkg/obj/libgc.a exists, it is a dreg from // are in one tree. If pkg/obj/libgc.a exists, it is a dreg from
// before we used subdirectories of obj. Delete all of obj // before we used subdirectories of obj. Delete all of obj
// to clean up. // to clean up.
if p := pathf("%s/pkg/obj/libgc.a", goroot); isfile(p) { if p := pathf("%s/pkg/obj/libgc.a", goroot); isfile(p) {
xremoveall(pathf("%s/pkg/obj", goroot)) xremoveall(pathf("%s/pkg/obj", goroot))

View File

@ -82,7 +82,7 @@ func mkzbootstrap(file string) {
} }
// stackGuardMultiplier returns a multiplier to apply to the default // stackGuardMultiplier returns a multiplier to apply to the default
// stack guard size. Larger multipliers are used for non-optimized // stack guard size. Larger multipliers are used for non-optimized
// builds that have larger stack frames. // builds that have larger stack frames.
func stackGuardMultiplier() int { func stackGuardMultiplier() int {
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") { for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {

View File

@ -378,7 +378,7 @@ func (t *tester) registerTests() {
}) })
// Test that internal linking of standard packages does not // Test that internal linking of standard packages does not
// require libgcc. This ensures that we can install a Go // require libgcc. This ensures that we can install a Go
// release on a system that does not have a C compiler // release on a system that does not have a C compiler
// installed and still build Go programs (that don't use cgo). // installed and still build Go programs (that don't use cgo).
for _, pkg := range cgoPackages { for _, pkg := range cgoPackages {

View File

@ -168,7 +168,7 @@ func processFile(filename string, useStdin bool) error {
// Print AST. We did that after each fix, so this appears // Print AST. We did that after each fix, so this appears
// redundant, but it is necessary to generate gofmt-compatible // redundant, but it is necessary to generate gofmt-compatible
// source code in a few cases. The official gofmt style is the // source code in a few cases. The official gofmt style is the
// output of the printer run on a standard AST generated by the parser, // output of the printer run on a standard AST generated by the parser,
// but the source we generated inside the loop above is the // but the source we generated inside the loop above is the
// output of the printer run on a mangled AST generated by a fixer. // output of the printer run on a mangled AST generated by a fixer.

View File

@ -18,9 +18,9 @@ import (
// The fact that it is partial is very important: the input is // The fact that it is partial is very important: the input is
// an AST and a description of some type information to // an AST and a description of some type information to
// assume about one or more packages, but not all the // assume about one or more packages, but not all the
// packages that the program imports. The checker is // packages that the program imports. The checker is
// expected to do as much as it can with what it has been // expected to do as much as it can with what it has been
// given. There is not enough information supplied to do // given. There is not enough information supplied to do
// a full type check, but the type checker is expected to // a full type check, but the type checker is expected to
// apply information that can be derived from variable // apply information that can be derived from variable
// declarations, function and method returns, and type switches // declarations, function and method returns, and type switches
@ -30,14 +30,14 @@ import (
// TODO(rsc,gri): Replace with go/typechecker. // TODO(rsc,gri): Replace with go/typechecker.
// Doing that could be an interesting test case for go/typechecker: // Doing that could be an interesting test case for go/typechecker:
// the constraints about working with partial information will // the constraints about working with partial information will
// likely exercise it in interesting ways. The ideal interface would // likely exercise it in interesting ways. The ideal interface would
// be to pass typecheck a map from importpath to package API text // be to pass typecheck a map from importpath to package API text
// (Go source code), but for now we use data structures (TypeConfig, Type). // (Go source code), but for now we use data structures (TypeConfig, Type).
// //
// The strings mostly use gofmt form. // The strings mostly use gofmt form.
// //
// A Field or FieldList has as its type a comma-separated list // A Field or FieldList has as its type a comma-separated list
// of the types of the fields. For example, the field list // of the types of the fields. For example, the field list
// x, y, z int // x, y, z int
// has type "int, int, int". // has type "int, int, int".
@ -242,7 +242,7 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a
// propagate the type to all the uses. // propagate the type to all the uses.
// The !isDecl case is a cheat here, but it makes // The !isDecl case is a cheat here, but it makes
// up in some cases for not paying attention to // up in some cases for not paying attention to
// struct fields. The real type checker will be // struct fields. The real type checker will be
// more accurate so we won't need the cheat. // more accurate so we won't need the cheat.
if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") { if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") {
typeof[id.Obj] = typ typeof[id.Obj] = typ
@ -367,7 +367,7 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a
typeof[n] = all typeof[n] = all
case *ast.ValueSpec: case *ast.ValueSpec:
// var declaration. Use type if present. // var declaration. Use type if present.
if n.Type != nil { if n.Type != nil {
t := typeof[n.Type] t := typeof[n.Type]
if !isType(t) { if !isType(t) {
@ -586,7 +586,7 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a
// Convert between function type strings and lists of types. // Convert between function type strings and lists of types.
// Using strings makes this a little harder, but it makes // Using strings makes this a little harder, but it makes
// a lot of the rest of the code easier. This will all go away // a lot of the rest of the code easier. This will all go away
// when we can use go/typechecker directly. // when we can use go/typechecker directly.
// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"]. // splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"].

View File

@ -517,7 +517,7 @@ func isMetaPackage(name string) bool {
} }
// libname returns the filename to use for the shared library when using // libname returns the filename to use for the shared library when using
// -buildmode=shared. The rules we use are: // -buildmode=shared. The rules we use are:
// Use arguments for special 'meta' packages: // Use arguments for special 'meta' packages:
// std --> libstd.so // std --> libstd.so
// std cmd --> libstd,cmd.so // std cmd --> libstd,cmd.so
@ -788,7 +788,7 @@ func goFilesPackage(gofiles []string) *Package {
// Synthesize fake "directory" that only shows the named files, // Synthesize fake "directory" that only shows the named files,
// to make it look like this is a standard package or // to make it look like this is a standard package or
// command directory. So that local imports resolve // command directory. So that local imports resolve
// consistently, the files must all be in the same directory. // consistently, the files must all be in the same directory.
var dirent []os.FileInfo var dirent []os.FileInfo
var dir string var dir string
@ -950,7 +950,7 @@ func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, looksha
// If we are not doing a cross-build, then record the binary we'll // If we are not doing a cross-build, then record the binary we'll
// generate for cgo as a dependency of the build of any package // generate for cgo as a dependency of the build of any package
// using cgo, to make sure we do not overwrite the binary while // using cgo, to make sure we do not overwrite the binary while
// a package is using it. If this is a cross-build, then the cgo we // a package is using it. If this is a cross-build, then the cgo we
// are writing is not the cgo we need to use. // are writing is not the cgo we need to use.
if goos == runtime.GOOS && goarch == runtime.GOARCH && !buildRace && !buildMSan { if goos == runtime.GOOS && goarch == runtime.GOARCH && !buildRace && !buildMSan {
if (len(p.CgoFiles) > 0 || p.Standard && p.ImportPath == "runtime/cgo") && !buildLinkshared && buildBuildmode != "shared" { if (len(p.CgoFiles) > 0 || p.Standard && p.ImportPath == "runtime/cgo") && !buildLinkshared && buildBuildmode != "shared" {
@ -986,7 +986,7 @@ func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, looksha
} }
if p.local && p.target == "" { if p.local && p.target == "" {
// Imported via local path. No permanent target. // Imported via local path. No permanent target.
mode = modeBuild mode = modeBuild
} }
work := p.pkgdir work := p.pkgdir
@ -1034,7 +1034,7 @@ func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, looksha
// the name will show up in ps listings. If the caller has specified // the name will show up in ps listings. If the caller has specified
// a name, use that instead of a.out. The binary is generated // a name, use that instead of a.out. The binary is generated
// in an otherwise empty subdirectory named exe to avoid // in an otherwise empty subdirectory named exe to avoid
// naming conflicts. The only possible conflict is if we were // naming conflicts. The only possible conflict is if we were
// to create a top-level package named exe. // to create a top-level package named exe.
name := "a.out" name := "a.out"
if p.exeName != "" { if p.exeName != "" {
@ -1224,10 +1224,10 @@ func (b *builder) do(root *action) {
// The original implementation here was a true queue // The original implementation here was a true queue
// (using a channel) but it had the effect of getting // (using a channel) but it had the effect of getting
// distracted by low-level leaf actions to the detriment // distracted by low-level leaf actions to the detriment
// of completing higher-level actions. The order of // of completing higher-level actions. The order of
// work does not matter much to overall execution time, // work does not matter much to overall execution time,
// but when running "go test std" it is nice to see each test // but when running "go test std" it is nice to see each test
// results as soon as possible. The priorities assigned // results as soon as possible. The priorities assigned
// ensure that, all else being equal, the execution prefers // ensure that, all else being equal, the execution prefers
// to do what it would have done first in a simple depth-first // to do what it would have done first in a simple depth-first
// dependency order traversal. // dependency order traversal.
@ -1547,7 +1547,7 @@ func (b *builder) build(a *action) (err error) {
// NOTE(rsc): On Windows, it is critically important that the // NOTE(rsc): On Windows, it is critically important that the
// gcc-compiled objects (cgoObjects) be listed after the ordinary // gcc-compiled objects (cgoObjects) be listed after the ordinary
// objects in the archive. I do not know why this is. // objects in the archive. I do not know why this is.
// https://golang.org/issue/2601 // https://golang.org/issue/2601
objects = append(objects, cgoObjects...) objects = append(objects, cgoObjects...)
@ -1653,7 +1653,7 @@ func (b *builder) install(a *action) (err error) {
} }
// remove object dir to keep the amount of // remove object dir to keep the amount of
// garbage down in a large build. On an operating system // garbage down in a large build. On an operating system
// with aggressive buffering, cleaning incrementally like // with aggressive buffering, cleaning incrementally like
// this keeps the intermediate objects from hitting the disk. // this keeps the intermediate objects from hitting the disk.
if !buildWork { if !buildWork {
@ -1798,7 +1798,7 @@ func (b *builder) copyFile(a *action, dst, src string, perm os.FileMode, force b
df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil && toolIsWindows { if err != nil && toolIsWindows {
// Windows does not allow deletion of a binary file // Windows does not allow deletion of a binary file
// while it is executing. Try to move it out of the way. // while it is executing. Try to move it out of the way.
// If the move fails, which is likely, we'll try again the // If the move fails, which is likely, we'll try again the
// next time we do an install of this binary. // next time we do an install of this binary.
if err := os.Rename(dst, dst+"~"); err == nil { if err := os.Rename(dst, dst+"~"); err == nil {
@ -1928,7 +1928,7 @@ func (b *builder) showcmd(dir string, format string, args ...interface{}) {
// The output is expected to contain references to 'dir', usually // The output is expected to contain references to 'dir', usually
// the source directory for the package that has failed to build. // the source directory for the package that has failed to build.
// showOutput rewrites mentions of dir with a relative path to dir // showOutput rewrites mentions of dir with a relative path to dir
// when the relative path is shorter. This is usually more pleasant. // when the relative path is shorter. This is usually more pleasant.
// For example, if fmt doesn't compile and we are in src/html, // For example, if fmt doesn't compile and we are in src/html,
// the output is // the output is
// //
@ -1986,7 +1986,7 @@ func relPaths(paths []string) []string {
// errPrintedOutput is a special error indicating that a command failed // errPrintedOutput is a special error indicating that a command failed
// but that it generated output as well, and that output has already // but that it generated output as well, and that output has already
// been printed, so there's no point showing 'exit status 1' or whatever // been printed, so there's no point showing 'exit status 1' or whatever
// the wait status was. The main executor, builder.do, knows not to // the wait status was. The main executor, builder.do, knows not to
// print this error. // print this error.
var errPrintedOutput = errors.New("already printed output - no need to show error") var errPrintedOutput = errors.New("already printed output - no need to show error")
@ -2055,7 +2055,7 @@ func (b *builder) runOut(dir string, desc string, env []string, cmdargs ...inter
err := cmd.Run() err := cmd.Run()
// cmd.Run will fail on Unix if some other process has the binary // cmd.Run will fail on Unix if some other process has the binary
// we want to run open for writing. This can happen here because // we want to run open for writing. This can happen here because
// we build and install the cgo command and then run it. // we build and install the cgo command and then run it.
// If another command was kicked off while we were writing the // If another command was kicked off while we were writing the
// cgo binary, the child process for that command may be holding // cgo binary, the child process for that command may be holding
@ -2067,27 +2067,27 @@ func (b *builder) runOut(dir string, desc string, env []string, cmdargs ...inter
// The answer is that running a command is fork and exec. // The answer is that running a command is fork and exec.
// A child forked while the cgo fd is open inherits that fd. // A child forked while the cgo fd is open inherits that fd.
// Until the child has called exec, it holds the fd open and the // Until the child has called exec, it holds the fd open and the
// kernel will not let us run cgo. Even if the child were to close // kernel will not let us run cgo. Even if the child were to close
// the fd explicitly, it would still be open from the time of the fork // the fd explicitly, it would still be open from the time of the fork
// until the time of the explicit close, and the race would remain. // until the time of the explicit close, and the race would remain.
// //
// On Unix systems, this results in ETXTBSY, which formats // On Unix systems, this results in ETXTBSY, which formats
// as "text file busy". Rather than hard-code specific error cases, // as "text file busy". Rather than hard-code specific error cases,
// we just look for that string. If this happens, sleep a little // we just look for that string. If this happens, sleep a little
// and try again. We let this happen three times, with increasing // and try again. We let this happen three times, with increasing
// sleep lengths: 100+200+400 ms = 0.7 seconds. // sleep lengths: 100+200+400 ms = 0.7 seconds.
// //
// An alternate solution might be to split the cmd.Run into // An alternate solution might be to split the cmd.Run into
// separate cmd.Start and cmd.Wait, and then use an RWLock // separate cmd.Start and cmd.Wait, and then use an RWLock
// to make sure that copyFile only executes when no cmd.Start // to make sure that copyFile only executes when no cmd.Start
// call is in progress. However, cmd.Start (really syscall.forkExec) // call is in progress. However, cmd.Start (really syscall.forkExec)
// only guarantees that when it returns, the exec is committed to // only guarantees that when it returns, the exec is committed to
// happen and succeed. It uses a close-on-exec file descriptor // happen and succeed. It uses a close-on-exec file descriptor
// itself to determine this, so we know that when cmd.Start returns, // itself to determine this, so we know that when cmd.Start returns,
// at least one close-on-exec file descriptor has been closed. // at least one close-on-exec file descriptor has been closed.
// However, we cannot be sure that all of them have been closed, // However, we cannot be sure that all of them have been closed,
// so the program might still encounter ETXTBSY even with such // so the program might still encounter ETXTBSY even with such
// an RWLock. The race window would be smaller, perhaps, but not // an RWLock. The race window would be smaller, perhaps, but not
// guaranteed to be gone. // guaranteed to be gone.
// //
// Sleeping when we observe the race seems to be the most reliable // Sleeping when we observe the race seems to be the most reliable
@ -2137,7 +2137,7 @@ func (b *builder) mkdir(dir string) error {
b.exec.Lock() b.exec.Lock()
defer b.exec.Unlock() defer b.exec.Unlock()
// We can be a little aggressive about being // We can be a little aggressive about being
// sure directories exist. Skip repeated calls. // sure directories exist. Skip repeated calls.
if b.mkdirCache[dir] { if b.mkdirCache[dir] {
return nil return nil
} }
@ -2745,7 +2745,7 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
// initialization code. // initialization code.
// //
// The user remains responsible for linking against // The user remains responsible for linking against
// -lgo -lpthread -lm in the final link. We can't use // -lgo -lpthread -lm in the final link. We can't use
// -r to pick them up because we can't combine // -r to pick them up because we can't combine
// split-stack and non-split-stack code in a single -r // split-stack and non-split-stack code in a single -r
// link, and libgo picks up non-split-stack code from // link, and libgo picks up non-split-stack code from
@ -3183,7 +3183,7 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
case strings.HasPrefix(f, "-fsanitize="): case strings.HasPrefix(f, "-fsanitize="):
continue continue
// runpath flags not applicable unless building a shared // runpath flags not applicable unless building a shared
// object or executable; see issue 12115 for details. This // object or executable; see issue 12115 for details. This
// is necessary as Go currently does not offer a way to // is necessary as Go currently does not offer a way to
// specify the set of LDFLAGS that only apply to shared // specify the set of LDFLAGS that only apply to shared
// objects. // objects.
@ -3534,12 +3534,12 @@ func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx b
// disableBuildID adjusts a linker command line to avoid creating a // disableBuildID adjusts a linker command line to avoid creating a
// build ID when creating an object file rather than an executable or // build ID when creating an object file rather than an executable or
// shared library. Some systems, such as Ubuntu, always add // shared library. Some systems, such as Ubuntu, always add
// --build-id to every link, but we don't want a build ID when we are // --build-id to every link, but we don't want a build ID when we are
// producing an object file. On some of those system a plain -r (not // producing an object file. On some of those system a plain -r (not
// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a // -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a
// plain -r. I don't know how to turn off --build-id when using clang // plain -r. I don't know how to turn off --build-id when using clang
// other than passing a trailing --build-id=none. So that is what we // other than passing a trailing --build-id=none. So that is what we
// do, but only on systems likely to support it, which is to say, // do, but only on systems likely to support it, which is to say,
// systems that normally use gold or the GNU linker. // systems that normally use gold or the GNU linker.
func (b *builder) disableBuildID(ldflags []string) []string { func (b *builder) disableBuildID(ldflags []string) []string {

View File

@ -112,7 +112,7 @@ func runGet(cmd *Command, args []string) {
// Code we downloaded and all code that depends on it // Code we downloaded and all code that depends on it
// needs to be evicted from the package cache so that // needs to be evicted from the package cache so that
// the information will be recomputed. Instead of keeping // the information will be recomputed. Instead of keeping
// track of the reverse dependency information, evict // track of the reverse dependency information, evict
// everything. // everything.
for name := range packageCache { for name := range packageCache {
@ -142,7 +142,7 @@ func runGet(cmd *Command, args []string) {
} }
// downloadPaths prepares the list of paths to pass to download. // downloadPaths prepares the list of paths to pass to download.
// It expands ... patterns that can be expanded. If there is no match // It expands ... patterns that can be expanded. If there is no match
// for a particular pattern, downloadPaths leaves it in the result list, // for a particular pattern, downloadPaths leaves it in the result list,
// in the hope that we can figure out the repository from the // in the hope that we can figure out the repository from the
// initial ...-free prefix. // initial ...-free prefix.
@ -153,7 +153,7 @@ func downloadPaths(args []string) []string {
if strings.Contains(a, "...") { if strings.Contains(a, "...") {
var expand []string var expand []string
// Use matchPackagesInFS to avoid printing // Use matchPackagesInFS to avoid printing
// warnings. They will be printed by the // warnings. They will be printed by the
// eventual call to importPaths instead. // eventual call to importPaths instead.
if build.IsLocalImport(a) { if build.IsLocalImport(a) {
expand = matchPackagesInFS(a) expand = matchPackagesInFS(a)
@ -237,7 +237,7 @@ func download(arg string, parent *Package, stk *importStack, mode int) {
return return
} }
// Warn that code.google.com is shutting down. We // Warn that code.google.com is shutting down. We
// issue the warning here because this is where we // issue the warning here because this is where we
// have the import stack. // have the import stack.
if strings.HasPrefix(p.ImportPath, "code.google.com") { if strings.HasPrefix(p.ImportPath, "code.google.com") {
@ -355,7 +355,7 @@ func downloadPackage(p *Package) error {
} }
if p.build.SrcRoot != "" { if p.build.SrcRoot != "" {
// Directory exists. Look for checkout along path to src. // Directory exists. Look for checkout along path to src.
vcs, rootPath, err = vcsForDir(p) vcs, rootPath, err = vcsForDir(p)
if err != nil { if err != nil {
return err return err
@ -399,7 +399,7 @@ func downloadPackage(p *Package) error {
} }
if p.build.SrcRoot == "" { if p.build.SrcRoot == "" {
// Package not found. Put in first directory of $GOPATH. // Package not found. Put in first directory of $GOPATH.
list := filepath.SplitList(buildContext.GOPATH) list := filepath.SplitList(buildContext.GOPATH)
if len(list) == 0 { if len(list) == 0 {
return fmt.Errorf("cannot download, $GOPATH not set. For more details see: go help gopath") return fmt.Errorf("cannot download, $GOPATH not set. For more details see: go help gopath")
@ -430,7 +430,7 @@ func downloadPackage(p *Package) error {
return fmt.Errorf("%s exists but is not a directory", meta) return fmt.Errorf("%s exists but is not a directory", meta)
} }
if err != nil { if err != nil {
// Metadata directory does not exist. Prepare to checkout new copy. // Metadata directory does not exist. Prepare to checkout new copy.
// Some version control tools require the target directory not to exist. // Some version control tools require the target directory not to exist.
// We require that too, just to avoid stepping on existing work. // We require that too, just to avoid stepping on existing work.
if _, err := os.Stat(root); err == nil { if _, err := os.Stat(root); err == nil {

View File

@ -109,7 +109,7 @@ func TestMain(m *testing.M) {
os.Exit(r) os.Exit(r)
} }
// The length of an mtime tick on this system. This is an estimate of // The length of an mtime tick on this system. This is an estimate of
// how long we need to sleep to ensure that the mtime of two files is // how long we need to sleep to ensure that the mtime of two files is
// different. // different.
// We used to try to be clever but that didn't always work (see golang.org/issue/12205). // We used to try to be clever but that didn't always work (see golang.org/issue/12205).
@ -181,7 +181,7 @@ func (tg *testgoData) pwd() string {
return wd return wd
} }
// cd changes the current directory to the named directory. Note that // cd changes the current directory to the named directory. Note that
// using this means that the test must not be run in parallel with any // using this means that the test must not be run in parallel with any
// other tests. // other tests.
func (tg *testgoData) cd(dir string) { func (tg *testgoData) cd(dir string) {
@ -325,7 +325,7 @@ func (tg *testgoData) getStderr() string {
} }
// doGrepMatch looks for a regular expression in a buffer, and returns // doGrepMatch looks for a regular expression in a buffer, and returns
// whether it is found. The regular expression is matched against // whether it is found. The regular expression is matched against
// each line separately, as with the grep command. // each line separately, as with the grep command.
func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool { func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool {
if !tg.ran { if !tg.ran {
@ -341,7 +341,7 @@ func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool {
} }
// doGrep looks for a regular expression in a buffer and fails if it // doGrep looks for a regular expression in a buffer and fails if it
// is not found. The name argument is the name of the output we are // is not found. The name argument is the name of the output we are
// searching, "output" or "error". The msg argument is logged on // searching, "output" or "error". The msg argument is logged on
// failure. // failure.
func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) { func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) {
@ -375,7 +375,7 @@ func (tg *testgoData) grepBoth(match, msg string) {
} }
// doGrepNot looks for a regular expression in a buffer and fails if // doGrepNot looks for a regular expression in a buffer and fails if
// it is found. The name and msg arguments are as for doGrep. // it is found. The name and msg arguments are as for doGrep.
func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) { func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) {
if tg.doGrepMatch(match, b) { if tg.doGrepMatch(match, b) {
tg.t.Log(msg) tg.t.Log(msg)
@ -440,8 +440,8 @@ func (tg *testgoData) grepCountBoth(match string) int {
} }
// creatingTemp records that the test plans to create a temporary file // creatingTemp records that the test plans to create a temporary file
// or directory. If the file or directory exists already, it will be // or directory. If the file or directory exists already, it will be
// removed. When the test completes, the file or directory will be // removed. When the test completes, the file or directory will be
// removed if it exists. // removed if it exists.
func (tg *testgoData) creatingTemp(path string) { func (tg *testgoData) creatingTemp(path string) {
if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) { if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) {
@ -457,7 +457,7 @@ func (tg *testgoData) creatingTemp(path string) {
tg.temps = append(tg.temps, path) tg.temps = append(tg.temps, path)
} }
// makeTempdir makes a temporary directory for a run of testgo. If // makeTempdir makes a temporary directory for a run of testgo. If
// the temporary directory was already created, this does nothing. // the temporary directory was already created, this does nothing.
func (tg *testgoData) makeTempdir() { func (tg *testgoData) makeTempdir() {
if tg.tempdir == "" { if tg.tempdir == "" {
@ -1105,8 +1105,8 @@ func testMove(t *testing.T, vcs, url, base, config string) {
} }
if vcs == "git" { if vcs == "git" {
// git will ask for a username and password when we // git will ask for a username and password when we
// run go get -d -f -u. An empty username and // run go get -d -f -u. An empty username and
// password will work. Prevent asking by setting // password will work. Prevent asking by setting
// GIT_ASKPASS. // GIT_ASKPASS.
tg.creatingTemp("sink" + exeSuffix) tg.creatingTemp("sink" + exeSuffix)
tg.tempFile("src/sink/sink.go", `package main; func main() {}`) tg.tempFile("src/sink/sink.go", `package main; func main() {}`)

View File

@ -339,7 +339,7 @@ func importPathsNoDotExpansion(args []string) []string {
for _, a := range args { for _, a := range args {
// Arguments are supposed to be import paths, but // Arguments are supposed to be import paths, but
// as a courtesy to Windows developers, rewrite \ to / // as a courtesy to Windows developers, rewrite \ to /
// in command-line arguments. Handles .\... and so on. // in command-line arguments. Handles .\... and so on.
if filepath.Separator == '\\' { if filepath.Separator == '\\' {
a = strings.Replace(a, `\`, `/`, -1) a = strings.Replace(a, `\`, `/`, -1)
} }
@ -472,7 +472,7 @@ NextVar:
} }
// matchPattern(pattern)(name) reports whether // matchPattern(pattern)(name) reports whether
// name matches pattern. Pattern is a limited glob // name matches pattern. Pattern is a limited glob
// pattern in which '...' means 'any string' and there // pattern in which '...' means 'any string' and there
// is no other special syntax. // is no other special syntax.
func matchPattern(pattern string) func(name string) bool { func matchPattern(pattern string) func(name string) bool {
@ -629,7 +629,7 @@ func matchPackages(pattern string) []string {
// allPackagesInFS is like allPackages but is passed a pattern // allPackagesInFS is like allPackages but is passed a pattern
// beginning ./ or ../, meaning it should scan the tree rooted // beginning ./ or ../, meaning it should scan the tree rooted
// at the given directory. There are ... in the pattern too. // at the given directory. There are ... in the pattern too.
func allPackagesInFS(pattern string) []string { func allPackagesInFS(pattern string) []string {
pkgs := matchPackagesInFS(pattern) pkgs := matchPackagesInFS(pattern)
if len(pkgs) == 0 { if len(pkgs) == 0 {

View File

@ -71,7 +71,7 @@ func readELFNote(filename, name string, typ int32) ([]byte, error) {
var elfGoNote = []byte("Go\x00\x00") var elfGoNote = []byte("Go\x00\x00")
// The Go build ID is stored in a note described by an ELF PT_NOTE prog // The Go build ID is stored in a note described by an ELF PT_NOTE prog
// header. The caller has already opened filename, to get f, and read // header. The caller has already opened filename, to get f, and read
// at least 4 kB out, in data. // at least 4 kB out, in data.
func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) { func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) {
// Assume the note content is in the data, already read. // Assume the note content is in the data, already read.

View File

@ -27,8 +27,8 @@ import (
// A Package describes a single package found in a directory. // A Package describes a single package found in a directory.
type Package struct { type Package struct {
// Note: These fields are part of the go command's public API. // Note: These fields are part of the go command's public API.
// See list.go. It is okay to add fields, but not to change or // See list.go. It is okay to add fields, but not to change or
// remove existing ones. Keep in sync with list.go // remove existing ones. Keep in sync with list.go
Dir string `json:",omitempty"` // directory containing package sources Dir string `json:",omitempty"` // directory containing package sources
ImportPath string `json:",omitempty"` // import path of package in dir ImportPath string `json:",omitempty"` // import path of package in dir
ImportComment string `json:",omitempty"` // path in import comment on package statement ImportComment string `json:",omitempty"` // path in import comment on package statement
@ -208,7 +208,7 @@ func (p *PackageError) Error() string {
return fmt.Sprintf("%s\npackage %s\n", p.Err, strings.Join(p.ImportStack, "\n\timports ")) return fmt.Sprintf("%s\npackage %s\n", p.Err, strings.Join(p.ImportStack, "\n\timports "))
} }
if p.Pos != "" { if p.Pos != "" {
// Omit import stack. The full path to the file where the error // Omit import stack. The full path to the file where the error
// is the most important thing. // is the most important thing.
return p.Pos + ": " + p.Err return p.Pos + ": " + p.Err
} }
@ -267,8 +267,8 @@ func reloadPackage(arg string, stk *importStack) *Package {
} }
// dirToImportPath returns the pseudo-import path we use for a package // dirToImportPath returns the pseudo-import path we use for a package
// outside the Go path. It begins with _/ and then contains the full path // outside the Go path. It begins with _/ and then contains the full path
// to the directory. If the package lives in c:\home\gopher\my\pkg then // to the directory. If the package lives in c:\home\gopher\my\pkg then
// the pseudo-import path is _/c_/home/gopher/my/pkg. // the pseudo-import path is _/c_/home/gopher/my/pkg.
// Using a pseudo-import path like this makes the ./ imports no longer // Using a pseudo-import path like this makes the ./ imports no longer
// a special case, so that all the code to deal with ordinary imports works // a special case, so that all the code to deal with ordinary imports works
@ -472,7 +472,7 @@ func hasGoFiles(dir string) bool {
} }
// reusePackage reuses package p to satisfy the import at the top // reusePackage reuses package p to satisfy the import at the top
// of the import stack stk. If this use causes an import loop, // of the import stack stk. If this use causes an import loop,
// reusePackage updates p's error information to record the loop. // reusePackage updates p's error information to record the loop.
func reusePackage(p *Package, stk *importStack) *Package { func reusePackage(p *Package, stk *importStack) *Package {
// We use p.imports==nil to detect a package that // We use p.imports==nil to detect a package that
@ -715,7 +715,7 @@ func expandScanner(err error) error {
// Prepare error with \n before each message. // Prepare error with \n before each message.
// When printed in something like context: %v // When printed in something like context: %v
// this will put the leading file positions each on // this will put the leading file positions each on
// its own line. It will also show all the errors // its own line. It will also show all the errors
// instead of just the first, as err.Error does. // instead of just the first, as err.Error does.
var buf bytes.Buffer var buf bytes.Buffer
for _, e := range err { for _, e := range err {
@ -1356,8 +1356,8 @@ func isStale(p *Package) bool {
} }
// A package without Go sources means we only found // A package without Go sources means we only found
// the installed .a file. Since we don't know how to rebuild // the installed .a file. Since we don't know how to rebuild
// it, it can't be stale, even if -a is set. This enables binary-only // it, it can't be stale, even if -a is set. This enables binary-only
// distributions of Go packages, although such binaries are // distributions of Go packages, although such binaries are
// only useful with the specific version of the toolchain that // only useful with the specific version of the toolchain that
// created them. // created them.
@ -1442,7 +1442,7 @@ func isStale(p *Package) bool {
// As a courtesy to developers installing new versions of the compiler // As a courtesy to developers installing new versions of the compiler
// frequently, define that packages are stale if they are // frequently, define that packages are stale if they are
// older than the compiler, and commands if they are older than // older than the compiler, and commands if they are older than
// the linker. This heuristic will not work if the binaries are // the linker. This heuristic will not work if the binaries are
// back-dated, as some binary distributions may do, but it does handle // back-dated, as some binary distributions may do, but it does handle
// a very common case. // a very common case.
// See issue 3036. // See issue 3036.
@ -1564,7 +1564,7 @@ var cwd, _ = os.Getwd()
var cmdCache = map[string]*Package{} var cmdCache = map[string]*Package{}
// loadPackage is like loadImport but is used for command-line arguments, // loadPackage is like loadImport but is used for command-line arguments,
// not for paths found in import statements. In addition to ordinary import paths, // not for paths found in import statements. In addition to ordinary import paths,
// loadPackage accepts pseudo-paths beginning with cmd/ to denote commands // loadPackage accepts pseudo-paths beginning with cmd/ to denote commands
// in the Go command directory, as well as paths to those directories. // in the Go command directory, as well as paths to those directories.
func loadPackage(arg string, stk *importStack) *Package { func loadPackage(arg string, stk *importStack) *Package {
@ -1628,7 +1628,7 @@ func loadPackage(arg string, stk *importStack) *Package {
// command line arguments 'args'. If a named package // command line arguments 'args'. If a named package
// cannot be loaded at all (for example, if the directory does not exist), // cannot be loaded at all (for example, if the directory does not exist),
// then packages prints an error and does not include that // then packages prints an error and does not include that
// package in the results. However, if errors occur trying // package in the results. However, if errors occur trying
// to load dependencies of a named package, the named // to load dependencies of a named package, the named
// package is still returned, with p.Incomplete = true // package is still returned, with p.Incomplete = true
// and details in p.DepsErrors. // and details in p.DepsErrors.

View File

@ -128,7 +128,7 @@ func runRun(cmd *Command, args []string) {
} }
// runProgram is the action for running a binary that has already // runProgram is the action for running a binary that has already
// been compiled. We ignore exit status. // been compiled. We ignore exit status.
func (b *builder) runProgram(a *action) error { func (b *builder) runProgram(a *action) error {
cmdline := stringList(findExecCmd(), a.deps[0].target, a.args) cmdline := stringList(findExecCmd(), a.deps[0].target, a.args)
if buildN || buildX { if buildN || buildX {

View File

@ -388,7 +388,7 @@ func runTest(cmd *Command, args []string) {
} }
// If a test timeout was given and is parseable, set our kill timeout // If a test timeout was given and is parseable, set our kill timeout
// to that timeout plus one minute. This is a backup alarm in case // to that timeout plus one minute. This is a backup alarm in case
// the test wedges with a goroutine spinning and its background // the test wedges with a goroutine spinning and its background
// timer does not get a chance to fire. // timer does not get a chance to fire.
if dt, err := time.ParseDuration(testTimeout); err == nil && dt > 0 { if dt, err := time.ParseDuration(testTimeout); err == nil && dt > 0 {
@ -691,7 +691,7 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action,
// the usual place in the temporary tree, because then // the usual place in the temporary tree, because then
// other tests will see it as the real package. // other tests will see it as the real package.
// Instead we make a _test directory under the import path // Instead we make a _test directory under the import path
// and then repeat the import path there. We tell the // and then repeat the import path there. We tell the
// compiler and linker to look in that _test directory first. // compiler and linker to look in that _test directory first.
// //
// That is, if the package under test is unicode/utf8, // That is, if the package under test is unicode/utf8,

View File

@ -93,7 +93,7 @@ var vcsHg = &vcsCmd{
downloadCmd: []string{"pull"}, downloadCmd: []string{"pull"},
// We allow both tag and branch names as 'tags' // We allow both tag and branch names as 'tags'
// for selecting a version. This lets people have // for selecting a version. This lets people have
// a go.release.r60 branch and a go1 branch // a go.release.r60 branch and a go1 branch
// and make changes in both, without constantly // and make changes in both, without constantly
// editing .hgtags. // editing .hgtags.

View File

@ -28,9 +28,9 @@ func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) (
) { ) {
// Try as whole source file. // Try as whole source file.
file, err = parser.ParseFile(fset, filename, src, parserMode) file, err = parser.ParseFile(fset, filename, src, parserMode)
// If there's no error, return. If the error is that the source file didn't begin with a // If there's no error, return. If the error is that the source file didn't begin with a
// package line and source fragments are ok, fall through to // package line and source fragments are ok, fall through to
// try as a source fragment. Stop and return on any other error. // try as a source fragment. Stop and return on any other error.
if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") {
return return
} }
@ -59,7 +59,7 @@ func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) (
// If this is a statement list, make it a source file // If this is a statement list, make it a source file
// by inserting a package clause and turning the list // by inserting a package clause and turning the list
// into a function body. This handles expressions too. // into a function body. This handles expressions too.
// Insert using a ;, not a newline, so that the line numbers // Insert using a ;, not a newline, so that the line numbers
// in fsrc match the ones in src. Add an extra '\n' before the '}' // in fsrc match the ones in src. Add an extra '\n' before the '}'
// to make sure comments are flushed before the '}'. // to make sure comments are flushed before the '}'.

View File

@ -158,7 +158,7 @@ func isWildcard(s string) bool {
// recording wildcard submatches in m. // recording wildcard submatches in m.
// If m == nil, match checks whether pattern == val. // If m == nil, match checks whether pattern == val.
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
// Wildcard matches any expression. If it appears multiple // Wildcard matches any expression. If it appears multiple
// times in the pattern, it must match the same expression // times in the pattern, it must match the same expression
// each time. // each time.
if m != nil && pattern.IsValid() && pattern.Type() == identType { if m != nil && pattern.IsValid() && pattern.Type() == identType {

Some files were not shown because too many files have changed in this diff Show More