diff --git a/AUTHORS b/AUTHORS
index 96704bd564..e2c8150ab0 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1435,6 +1435,7 @@ Wei Guangjing
Weichao Tang
Weixie Cui <523516579@qq.com>
Wembley G. Leach, Jr
+Wen Yang
Will Faught
Will Storey
Willem van der Schyff
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index f79c4132b8..ea8b1b964e 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -2746,6 +2746,7 @@ Weichao Tang
Weilu Jia
Weixie Cui <523516579@qq.com>
Wembley G. Leach, Jr
+Wen Yang
Wenlei (Frank) He
Wenzel Lowe
Wil Selwood
diff --git a/api/next/35044.txt b/api/next/35044.txt
new file mode 100644
index 0000000000..0ed6f2e4d0
--- /dev/null
+++ b/api/next/35044.txt
@@ -0,0 +1 @@
+pkg crypto/x509, method (*CertPool) Clone() *CertPool #35044
\ No newline at end of file
diff --git a/api/next/42710.txt b/api/next/42710.txt
new file mode 100644
index 0000000000..7879758d16
--- /dev/null
+++ b/api/next/42710.txt
@@ -0,0 +1,2 @@
+pkg hash/maphash, func Bytes(Seed, []uint8) uint64 #42710
+pkg hash/maphash, func String(Seed, string) uint64 #42710
diff --git a/api/next/50340.txt b/api/next/50340.txt
new file mode 100644
index 0000000000..211392cd25
--- /dev/null
+++ b/api/next/50340.txt
@@ -0,0 +1 @@
+pkg sort, func Find(int, func(int) int) (int, bool) #50340
diff --git a/api/next/50674.txt b/api/next/50674.txt
new file mode 100644
index 0000000000..6b5bca3a9d
--- /dev/null
+++ b/api/next/50674.txt
@@ -0,0 +1,9 @@
+pkg crypto/x509, func ParseRevocationList([]uint8) (*RevocationList, error) #50674
+pkg crypto/x509, method (*RevocationList) CheckSignatureFrom(*Certificate) error #50674
+pkg crypto/x509, type RevocationList struct, AuthorityKeyId []uint8 #50674
+pkg crypto/x509, type RevocationList struct, Extensions []pkix.Extension #50674
+pkg crypto/x509, type RevocationList struct, Issuer pkix.Name #50674
+pkg crypto/x509, type RevocationList struct, Raw []uint8 #50674
+pkg crypto/x509, type RevocationList struct, RawIssuer []uint8 #50674
+pkg crypto/x509, type RevocationList struct, RawTBSRevocationList []uint8 #50674
+pkg crypto/x509, type RevocationList struct, Signature []uint8 #50674
diff --git a/api/next/51082.txt b/api/next/51082.txt
new file mode 100644
index 0000000000..b05997f985
--- /dev/null
+++ b/api/next/51082.txt
@@ -0,0 +1,61 @@
+pkg go/doc, method (*Package) HTML(string) []uint8 #51082
+pkg go/doc, method (*Package) Markdown(string) []uint8 #51082
+pkg go/doc, method (*Package) Parser() *comment.Parser #51082
+pkg go/doc, method (*Package) Printer() *comment.Printer #51082
+pkg go/doc, method (*Package) Synopsis(string) string #51082
+pkg go/doc, method (*Package) Text(string) []uint8 #51082
+pkg go/doc/comment, func DefaultLookupPackage(string) (string, bool) #51082
+pkg go/doc/comment, method (*DocLink) DefaultURL(string) string #51082
+pkg go/doc/comment, method (*Heading) DefaultID() string #51082
+pkg go/doc/comment, method (*List) BlankBefore() bool #51082
+pkg go/doc/comment, method (*List) BlankBetween() bool #51082
+pkg go/doc/comment, method (*Parser) Parse(string) *Doc #51082
+pkg go/doc/comment, method (*Printer) Comment(*Doc) []uint8 #51082
+pkg go/doc/comment, method (*Printer) HTML(*Doc) []uint8 #51082
+pkg go/doc/comment, method (*Printer) Markdown(*Doc) []uint8 #51082
+pkg go/doc/comment, method (*Printer) Text(*Doc) []uint8 #51082
+pkg go/doc/comment, type Block interface, unexported methods #51082
+pkg go/doc/comment, type Code struct #51082
+pkg go/doc/comment, type Code struct, Text string #51082
+pkg go/doc/comment, type Doc struct #51082
+pkg go/doc/comment, type Doc struct, Content []Block #51082
+pkg go/doc/comment, type Doc struct, Links []*LinkDef #51082
+pkg go/doc/comment, type DocLink struct #51082
+pkg go/doc/comment, type DocLink struct, ImportPath string #51082
+pkg go/doc/comment, type DocLink struct, Name string #51082
+pkg go/doc/comment, type DocLink struct, Recv string #51082
+pkg go/doc/comment, type DocLink struct, Text []Text #51082
+pkg go/doc/comment, type Heading struct #51082
+pkg go/doc/comment, type Heading struct, Text []Text #51082
+pkg go/doc/comment, type Italic string #51082
+pkg go/doc/comment, type Link struct #51082
+pkg go/doc/comment, type Link struct, Auto bool #51082
+pkg go/doc/comment, type Link struct, Text []Text #51082
+pkg go/doc/comment, type Link struct, URL string #51082
+pkg go/doc/comment, type LinkDef struct #51082
+pkg go/doc/comment, type LinkDef struct, Text string #51082
+pkg go/doc/comment, type LinkDef struct, URL string #51082
+pkg go/doc/comment, type LinkDef struct, Used bool #51082
+pkg go/doc/comment, type List struct #51082
+pkg go/doc/comment, type List struct, ForceBlankBefore bool #51082
+pkg go/doc/comment, type List struct, ForceBlankBetween bool #51082
+pkg go/doc/comment, type List struct, Items []*ListItem #51082
+pkg go/doc/comment, type ListItem struct #51082
+pkg go/doc/comment, type ListItem struct, Content []Block #51082
+pkg go/doc/comment, type ListItem struct, Number string #51082
+pkg go/doc/comment, type Paragraph struct #51082
+pkg go/doc/comment, type Paragraph struct, Text []Text #51082
+pkg go/doc/comment, type Parser struct #51082
+pkg go/doc/comment, type Parser struct, LookupPackage func(string) (string, bool) #51082
+pkg go/doc/comment, type Parser struct, LookupSym func(string, string) bool #51082
+pkg go/doc/comment, type Parser struct, Words map[string]string #51082
+pkg go/doc/comment, type Plain string #51082
+pkg go/doc/comment, type Printer struct #51082
+pkg go/doc/comment, type Printer struct, DocLinkBaseURL string #51082
+pkg go/doc/comment, type Printer struct, DocLinkURL func(*DocLink) string #51082
+pkg go/doc/comment, type Printer struct, HeadingID func(*Heading) string #51082
+pkg go/doc/comment, type Printer struct, HeadingLevel int #51082
+pkg go/doc/comment, type Printer struct, TextCodePrefix string #51082
+pkg go/doc/comment, type Printer struct, TextPrefix string #51082
+pkg go/doc/comment, type Printer struct, TextWidth int #51082
+pkg go/doc/comment, type Text interface, unexported methods #51082
diff --git a/api/next/51644.txt b/api/next/51644.txt
new file mode 100644
index 0000000000..d93dbbf184
--- /dev/null
+++ b/api/next/51644.txt
@@ -0,0 +1,2 @@
+pkg encoding/binary, func AppendUvarint([]uint8, uint64) []uint8 #51644
+pkg encoding/binary, func AppendVarint([]uint8, int64) []uint8 #51644
diff --git a/doc/go1.19.html b/doc/go1.19.html
index 857d8ed8ce..a813d59cb8 100644
--- a/doc/go1.19.html
+++ b/doc/go1.19.html
@@ -32,7 +32,22 @@ Do not send CLs removing the interior tags from such phrases.
Go command
- TODO: complete this section, or delete if not needed
+ TODO: complete this section.
+
+
+
+
+ The -trimpath flag, if set, is now included in the build settings
+ stamped into Go binaries by gobuild, and can be
+ examined using
+ goversion-m
+ or debug.ReadBuildInfo.
+
+
+ gogenerate now sets the GOROOT
+ environment variable explicitly in the generator's environment, so that
+ generators can locate the correct GOROOT even if built
+ with -trimpath.
New unix build constraint
@@ -76,6 +91,19 @@ Do not send CLs removing the interior tags from such phrases.
+ Draw with the Src operator preserves
+ non-premultiplied-alpha colors when destination and source images are
+ both *image.NRGBA (or both *image.NRGBA64).
+ This reverts a behavior change accidentally introduced by a Go 1.18
+ library optimization, to match the behavior in Go 1.17 and earlier.
+
@@ -93,9 +121,9 @@ Do not send CLs removing the interior tags from such phrases.
When a net package function or method returns an "I/O timeout"
error, the error will now satisfy errors.Is(err,
- context.Canceled). When a net package function returns
- an "operation was canceled" error, the error will now satisfy
- errors.Is(err, context.DeadlineExceeded).
+ context.DeadlineExceeded). When a net package function
+ returns an "operation was canceled" error, the error will now
+ satisfy errors.Is(err, context.Canceled).
These changes are intended to make it easier for code to test
for cases in which a context cancelation or timeout causes a net
package function or method to return an error, while preserving
@@ -104,6 +132,17 @@ Do not send CLs removing the interior tags from such phrases.
+ The GOROOT function now returns the empty string
+ (instead of "go") when the binary was built with
+ the -trimpath flag set and the GOROOT
+ variable is not set in the process environment.
+
diff --git a/misc/cgo/gmp/gmp.go b/misc/cgo/gmp/gmp.go
index 971a10aaac..0835fdc8de 100644
--- a/misc/cgo/gmp/gmp.go
+++ b/misc/cgo/gmp/gmp.go
@@ -333,10 +333,9 @@ func (z *Int) Abs(x *Int) *Int {
// CmpInt compares x and y. The result is
//
-// -1 if x < y
-// 0 if x == y
-// +1 if x > y
-//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
func CmpInt(x, y *Int) int {
x.doinit()
y.doinit()
diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go
index c9e9e5fe63..e4898778be 100644
--- a/misc/cgo/testcshared/cshared_test.go
+++ b/misc/cgo/testcshared/cshared_test.go
@@ -5,6 +5,7 @@
package cshared_test
import (
+ "bufio"
"bytes"
"debug/elf"
"debug/pe"
@@ -838,3 +839,51 @@ func TestGo2C2Go(t *testing.T) {
run(t, goenv, "go", "build", "-o", bin, "./go2c2go/m2")
runExe(t, runenv, bin)
}
+
+func TestIssue36233(t *testing.T) {
+ t.Parallel()
+
+ // Test that the export header uses GoComplex64 and GoComplex128
+ // for complex types.
+
+ tmpdir, err := os.MkdirTemp("", "cshared-TestIssue36233")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ const exportHeader = "issue36233.h"
+
+ run(t, nil, "go", "tool", "cgo", "-exportheader", exportHeader, "-objdir", tmpdir, "./issue36233/issue36233.go")
+ data, err := os.ReadFile(exportHeader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ funcs := []struct{ name, signature string }{
+ {"exportComplex64", "GoComplex64 exportComplex64(GoComplex64 v)"},
+ {"exportComplex128", "GoComplex128 exportComplex128(GoComplex128 v)"},
+ {"exportComplexfloat", "GoComplex64 exportComplexfloat(GoComplex64 v)"},
+ {"exportComplexdouble", "GoComplex128 exportComplexdouble(GoComplex128 v)"},
+ }
+
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ var found int
+ for scanner.Scan() {
+ b := scanner.Bytes()
+ for _, fn := range funcs {
+ if bytes.Contains(b, []byte(fn.name)) {
+ found++
+ if !bytes.Contains(b, []byte(fn.signature)) {
+ t.Errorf("function signature mismatch; got %q, want %q", b, fn.signature)
+ }
+ }
+ }
+ }
+ if err = scanner.Err(); err != nil {
+ t.Errorf("scanner encountered error: %v", err)
+ }
+ if found != len(funcs) {
+ t.Error("missing functions")
+ }
+}
diff --git a/misc/cgo/testcshared/testdata/issue36233/issue36233.go b/misc/cgo/testcshared/testdata/issue36233/issue36233.go
new file mode 100644
index 0000000000..d0d1e5d50a
--- /dev/null
+++ b/misc/cgo/testcshared/testdata/issue36233/issue36233.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package main
+
+// #include
+import "C"
+
+//export exportComplex64
+func exportComplex64(v complex64) complex64 {
+ return v
+}
+
+//export exportComplex128
+func exportComplex128(v complex128) complex128 {
+ return v
+}
+
+//export exportComplexfloat
+func exportComplexfloat(v C.complexfloat) C.complexfloat {
+ return v
+}
+
+//export exportComplexdouble
+func exportComplexdouble(v C.complexdouble) C.complexdouble {
+ return v
+}
+
+func main() {}
diff --git a/misc/ios/go_ios_exec.go b/misc/ios/go_ios_exec.go
index 34a734cda7..c275dd339c 100644
--- a/misc/ios/go_ios_exec.go
+++ b/misc/ios/go_ios_exec.go
@@ -13,9 +13,11 @@
// binary.
//
// This script requires that three environment variables be set:
-// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
-// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
-// GOIOS_TEAM_ID: The team id that owns the app id prefix.
+//
+// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
+// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
+// GOIOS_TEAM_ID: The team id that owns the app id prefix.
+//
// $GOROOT/misc/ios contains a script, detect.go, that attempts to autodetect these.
package main
diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go
index c99b5c1920..f6d701d925 100644
--- a/src/archive/tar/common.go
+++ b/src/archive/tar/common.go
@@ -221,9 +221,11 @@ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
// that the file has no data in it, which is rather odd.
//
// As an example, if the underlying raw file contains the 10-byte data:
+//
// var compactFile = "abcdefgh"
//
// And the sparse map has the following entries:
+//
// var spd sparseDatas = []sparseEntry{
// {Offset: 2, Length: 5}, // Data fragment for 2..6
// {Offset: 18, Length: 3}, // Data fragment for 18..20
@@ -235,6 +237,7 @@ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
// }
//
// Then the content of the resulting sparse file with a Header.Size of 25 is:
+//
// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
type (
sparseDatas []sparseEntry
@@ -293,9 +296,9 @@ func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
// The input must have been already validated.
//
// This function mutates src and returns a normalized map where:
-// * adjacent fragments are coalesced together
-// * only the last fragment may be empty
-// * the endOffset of the last fragment is the total size
+// - adjacent fragments are coalesced together
+// - only the last fragment may be empty
+// - the endOffset of the last fragment is the total size
func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
dst := src[:0]
var pre sparseEntry
diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
index 4b11909bc9..f1b35c34f6 100644
--- a/src/archive/tar/reader.go
+++ b/src/archive/tar/reader.go
@@ -336,9 +336,9 @@ func parsePAX(r io.Reader) (map[string]string, error) {
// header in case further processing is required.
//
// The err will be set to io.EOF only when one of the following occurs:
-// * Exactly 0 bytes are read and EOF is hit.
-// * Exactly 1 block of zeros is read and EOF is hit.
-// * At least 2 blocks of zeros are read.
+// - Exactly 0 bytes are read and EOF is hit.
+// - Exactly 1 block of zeros is read and EOF is hit.
+// - At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() (*Header, *block, error) {
// Two blocks of zero bytes marks the end of the archive.
if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
diff --git a/src/archive/tar/strconv.go b/src/archive/tar/strconv.go
index 275db6f026..ac3196370e 100644
--- a/src/archive/tar/strconv.go
+++ b/src/archive/tar/strconv.go
@@ -306,6 +306,7 @@ func formatPAXRecord(k, v string) (string, error) {
// validPAXRecord reports whether the key-value pair is valid where each
// record is formatted as:
+//
// "%d %s=%s\n" % (size, key, value)
//
// Keys and values should be UTF-8, but the number of bad writers out there
diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
index 92fd6f6a92..b4f6a8d714 100644
--- a/src/archive/zip/reader.go
+++ b/src/archive/zip/reader.go
@@ -229,6 +229,9 @@ func (r *checksumReader) Read(b []byte) (n int, err error) {
n, err = r.rc.Read(b)
r.hash.Write(b[:n])
r.nread += uint64(n)
+ if r.nread > r.f.UncompressedSize64 {
+ return 0, ErrFormat
+ }
if err == nil {
return
}
diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
index 9bc23642c0..fd0a171304 100644
--- a/src/archive/zip/reader_test.go
+++ b/src/archive/zip/reader_test.go
@@ -1407,3 +1407,30 @@ func TestCVE202141772(t *testing.T) {
t.Errorf("Inconsistent name in info entry: %v", name)
}
}
+
+func TestUnderSize(t *testing.T) {
+ z, err := OpenReader("testdata/readme.zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer z.Close()
+
+ for _, f := range z.File {
+ f.UncompressedSize64 = 1
+ }
+
+ for _, f := range z.File {
+ t.Run(f.Name, func(t *testing.T) {
+ rd, err := f.Open()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rd.Close()
+
+ _, err = io.Copy(io.Discard, rd)
+ if err != ErrFormat {
+ t.Fatalf("Error mismatch\n\tGot: %v\n\tWant: %v", err, ErrFormat)
+ }
+ })
+ }
+}
diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go
index 8997902f8f..e3e4df9de6 100644
--- a/src/builtin/builtin.go
+++ b/src/builtin/builtin.go
@@ -137,9 +137,12 @@ type ComplexType complex64
// new elements. If it does not, a new underlying array will be allocated.
// Append returns the updated slice. It is therefore necessary to store the
// result of append, often in the variable holding the slice itself:
+//
// slice = append(slice, elem1, elem2)
// slice = append(slice, anotherSlice...)
+//
// As a special case, it is legal to append a string to a byte slice, like this:
+//
// slice = append([]byte("hello "), "world"...)
func append(slice []Type, elems ...Type) []Type
@@ -156,24 +159,28 @@ func copy(dst, src []Type) int
func delete(m map[Type]Type1, key Type)
// The len built-in function returns the length of v, according to its type:
+//
// Array: the number of elements in v.
// Pointer to array: the number of elements in *v (even if v is nil).
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
// String: the number of bytes in v.
// Channel: the number of elements queued (unread) in the channel buffer;
// if v is nil, len(v) is zero.
+//
// For some arguments, such as a string literal or a simple array expression, the
// result can be a constant. See the Go language specification's "Length and
// capacity" section for details.
func len(v Type) int
// The cap built-in function returns the capacity of v, according to its type:
+//
// Array: the number of elements in v (same as len(v)).
// Pointer to array: the number of elements in *v (same as len(v)).
// Slice: the maximum length the slice can reach when resliced;
// if v is nil, cap(v) is zero.
// Channel: the channel buffer capacity, in units of elements;
// if v is nil, cap(v) is zero.
+//
// For some arguments, such as a simple array expression, the result can be a
// constant. See the Go language specification's "Length and capacity" section for
// details.
@@ -184,6 +191,7 @@ func cap(v Type) int
// value. Unlike new, make's return type is the same as the type of its
// argument, not a pointer to it. The specification of the result depends on
// the type:
+//
// Slice: The size specifies the length. The capacity of the slice is
// equal to its length. A second integer argument may be provided to
// specify a different capacity; it must be no smaller than the
@@ -225,7 +233,9 @@ func imag(c ComplexType) FloatType
// the last sent value is received. After the last value has been received
// from a closed channel c, any receive from c will succeed without
// blocking, returning the zero value for the channel element. The form
+//
// x, ok := <-c
+//
// will also set ok to false for a closed channel.
func close(c chan<- Type)
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index e3dab4d035..659a82bcc8 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -30,7 +30,7 @@ func Compare(a, b []byte) int {
// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
func explode(s []byte, n int) [][]byte {
- if n <= 0 {
+ if n <= 0 || n > len(s) {
n = len(s)
}
a := make([][]byte, n)
@@ -348,6 +348,9 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
if n < 0 {
n = Count(s, sep) + 1
}
+ if n > len(s)+1 {
+ n = len(s) + 1
+ }
a := make([][]byte, n)
n--
@@ -369,9 +372,10 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
// the subslices between those separators.
// If sep is empty, SplitN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
-// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
-// n == 0: the result is nil (zero subslices)
-// n < 0: all subslices
+//
+// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+// n == 0: the result is nil (zero subslices)
+// n < 0: all subslices
//
// To split around the first instance of a separator, see Cut.
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
@@ -380,9 +384,10 @@ func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
// returns a slice of those subslices.
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
-// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
-// n == 0: the result is nil (zero subslices)
-// n < 0: all subslices
+//
+// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+// n == 0: the result is nil (zero subslices)
+// n < 0: all subslices
func SplitAfterN(s, sep []byte, n int) [][]byte {
return genSplit(s, sep, len(sep), n)
}
@@ -1139,7 +1144,7 @@ func ReplaceAll(s, old, new []byte) []byte {
}
// EqualFold reports whether s and t, interpreted as UTF-8 strings,
-// are equal under Unicode case-folding, which is a more general
+// are equal under simple Unicode case-folding, which is a more general
// form of case-insensitivity.
func EqualFold(s, t []byte) bool {
for len(s) != 0 && len(t) != 0 {
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index 2e6ab31540..b702efb239 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -8,6 +8,7 @@ import (
. "bytes"
"fmt"
"internal/testenv"
+ "math"
"math/rand"
"reflect"
"strings"
@@ -723,6 +724,7 @@ var splittests = []SplitTest{
{"1 2", " ", 3, []string{"1", "2"}},
{"123", "", 2, []string{"1", "23"}},
{"123", "", 17, []string{"1", "2", "3"}},
+ {"bT", "T", math.MaxInt / 4, []string{"b", ""}},
}
func TestSplit(t *testing.T) {
diff --git a/src/cmd/addr2line/main.go b/src/cmd/addr2line/main.go
index 018802940b..6e005a8fac 100644
--- a/src/cmd/addr2line/main.go
+++ b/src/cmd/addr2line/main.go
@@ -6,6 +6,7 @@
// just enough to support pprof.
//
// Usage:
+//
// go tool addr2line binary
//
// Addr2line reads hexadecimal addresses, one per line and with optional 0x prefix,
diff --git a/src/cmd/asm/doc.go b/src/cmd/asm/doc.go
index 4a0c785aad..098f063909 100644
--- a/src/cmd/asm/doc.go
+++ b/src/cmd/asm/doc.go
@@ -3,11 +3,11 @@
// license that can be found in the LICENSE file.
/*
-Asm, typically invoked as ``go tool asm'', assembles the source file into an object
+Asm, typically invoked as “go tool asm”, assembles the source file into an object
file named for the basename of the argument source file with a .o suffix. The
object file can then be combined with other objects into a package archive.
-Command Line
+# Command Line
Usage:
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index 59aedbf0cc..acd03e1399 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -162,7 +162,7 @@ func (p *Parser) nextToken() lex.ScanToken {
// line consumes a single assembly line from p.lex of the form
//
-// {label:} WORD[.cond] [ arg {, arg} ] (';' | '\n')
+// {label:} WORD[.cond] [ arg {, arg} ] (';' | '\n')
//
// It adds any labels to p.pendingLabels and returns the word, cond,
// operand list, and true. If there is an error or EOF, it returns
@@ -891,7 +891,7 @@ func (p *Parser) symRefAttrs(name string, issueError bool) (bool, obj.ABI) {
// constrained form of the operand syntax that's always SB-based,
// non-static, and has at most a simple integer offset:
//
-// [$|*]sym[][+Int](SB)
+// [$|*]sym[][+Int](SB)
func (p *Parser) funcAddress() (string, obj.ABI, bool) {
switch p.peek() {
case '$', '*':
@@ -1041,9 +1041,13 @@ func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) {
//
// For 386/AMD64 register list specifies 4VNNIW-style multi-source operand.
// For range of 4 elements, Intel manual uses "+3" notation, for example:
+//
// VP4DPWSSDS zmm1{k1}{z}, zmm2+3, m128
+//
// Given asm line:
+//
// VP4DPWSSDS Z5, [Z10-Z13], (AX)
+//
// zmm2 is Z10, and Z13 is the only valid value for it (Z10+3).
// Only simple ranges are accepted, like [Z0-Z3].
//
diff --git a/src/cmd/asm/internal/lex/tokenizer.go b/src/cmd/asm/internal/lex/tokenizer.go
index 861a2d421d..4db88e20c3 100644
--- a/src/cmd/asm/internal/lex/tokenizer.go
+++ b/src/cmd/asm/internal/lex/tokenizer.go
@@ -109,7 +109,7 @@ func (t *Tokenizer) Next() ScanToken {
}
text := s.TokenText()
t.line += strings.Count(text, "\n")
- // TODO: Use constraint.IsGoBuild once it exists.
+ // TODO: Use constraint.IsGoBuild once #44505 fixed.
if strings.HasPrefix(text, "//go:build") {
t.tok = BuildComment
break
diff --git a/src/cmd/buildid/doc.go b/src/cmd/buildid/doc.go
index d1ec155c97..a554d798c0 100644
--- a/src/cmd/buildid/doc.go
+++ b/src/cmd/buildid/doc.go
@@ -6,6 +6,7 @@
Buildid displays or updates the build ID stored in a Go package or binary.
Usage:
+
go tool buildid [-w] file
By default, buildid prints the build ID found in the named file.
diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go
index a6787f6405..4c62c5d70e 100644
--- a/src/cmd/cgo/doc.go
+++ b/src/cmd/cgo/doc.go
@@ -3,10 +3,9 @@
// license that can be found in the LICENSE file.
/*
-
Cgo enables the creation of Go packages that call C code.
-Using cgo with the go command
+# Using cgo with the go command
To use cgo write normal Go code that imports a pseudo-package "C".
The Go code can then refer to types such as C.size_t, variables such
@@ -91,11 +90,11 @@ file. This allows pre-compiled static libraries to be included in the package
directory and linked properly.
For example if package foo is in the directory /go/src/foo:
- // #cgo LDFLAGS: -L${SRCDIR}/libs -lfoo
+ // #cgo LDFLAGS: -L${SRCDIR}/libs -lfoo
Will be expanded to:
- // #cgo LDFLAGS: -L/go/src/foo/libs -lfoo
+ // #cgo LDFLAGS: -L/go/src/foo/libs -lfoo
When the Go tool sees that one or more Go files use the special import
"C", it will look for other non-Go files in the directory and compile
@@ -139,7 +138,7 @@ or you can set the CC environment variable any time you run the go tool.
The CXX_FOR_TARGET, CXX_FOR_${GOOS}_${GOARCH}, and CXX
environment variables work in a similar way for C++ code.
-Go references to C
+# Go references to C
Within the Go file, C's struct field names that are keywords in Go
can be accessed by prefixing them with an underscore: if x points at a C
@@ -291,7 +290,7 @@ the helper function crashes the program, like when Go itself runs out
of memory. Because C.malloc cannot fail, it has no two-result form
that returns errno.
-C references to Go
+# C references to Go
Go functions can be exported for use by C code in the following way:
@@ -327,7 +326,7 @@ definitions and declarations, then the two output files will produce
duplicate symbols and the linker will fail. To avoid this, definitions
must be placed in preambles in other files, or in C source files.
-Passing pointers
+# Passing pointers
Go is a garbage collected language, and the garbage collector needs to
know the location of every pointer to Go memory. Because of this,
@@ -398,7 +397,7 @@ passing uninitialized C memory to Go code if the Go code is going to
store pointer values in it. Zero out the memory in C before passing it
to Go.
-Special cases
+# Special cases
A few special C types which would normally be represented by a pointer
type in Go are instead represented by a uintptr. Those include:
@@ -449,9 +448,10 @@ to auto-update code from Go 1.14 and earlier:
go tool fix -r eglconf
-Using cgo directly
+# Using cgo directly
Usage:
+
go tool cgo [cgo options] [-- compiler options] gofiles...
Cgo transforms the specified input Go source files into several output
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index 9877182fc4..a52163fd65 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -114,11 +114,11 @@ func (p *Package) addToFlag(flag string, args []string) {
//
// For example, the following string:
//
-// `a b:"c d" 'e''f' "g\""`
+// `a b:"c d" 'e''f' "g\""`
//
// Would be parsed as:
//
-// []string{"a", "b:c d", "ef", `g"`}
+// []string{"a", "b:c d", "ef", `g"`}
func splitQuoted(s string) (r []string, err error) {
var args []string
arg := make([]rune, len(s))
@@ -1137,13 +1137,19 @@ func (p *Package) mangle(f *File, arg *ast.Expr, addPosition bool) (ast.Expr, bo
// checkIndex checks whether arg has the form &a[i], possibly inside
// type conversions. If so, then in the general case it writes
-// _cgoIndexNN := a
-// _cgoNN := &cgoIndexNN[i] // with type conversions, if any
+//
+// _cgoIndexNN := a
+// _cgoNN := &cgoIndexNN[i] // with type conversions, if any
+//
// to sb, and writes
-// _cgoCheckPointer(_cgoNN, _cgoIndexNN)
+//
+// _cgoCheckPointer(_cgoNN, _cgoIndexNN)
+//
// to sbCheck, and returns true. If a is a simple variable or field reference,
// it writes
-// _cgoIndexNN := &a
+//
+// _cgoIndexNN := &a
+//
// and dereferences the uses of _cgoIndexNN. Taking the address avoids
// making a copy of an array.
//
@@ -1191,10 +1197,14 @@ func (p *Package) checkIndex(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) boo
// checkAddr checks whether arg has the form &x, possibly inside type
// conversions. If so, it writes
-// _cgoBaseNN := &x
-// _cgoNN := _cgoBaseNN // with type conversions, if any
+//
+// _cgoBaseNN := &x
+// _cgoNN := _cgoBaseNN // with type conversions, if any
+//
// to sb, and writes
-// _cgoCheckPointer(_cgoBaseNN, true)
+//
+// _cgoCheckPointer(_cgoBaseNN, true)
+//
// to sbCheck, and returns true. This tells _cgoCheckPointer to check
// just the contents of the pointer being passed, not any other part
// of the memory allocation. This is run after checkIndex, which looks
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index 8ead173e64..adbb761e38 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -1399,6 +1399,19 @@ func (p *Package) cgoType(e ast.Expr) *Type {
case *ast.ChanType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoChan")}
case *ast.Ident:
+ goTypesFixup := func(r *Type) *Type {
+ if r.Size == 0 { // int or uint
+ rr := new(Type)
+ *rr = *r
+ rr.Size = p.IntSize
+ rr.Align = p.IntSize
+ r = rr
+ }
+ if r.Align > p.PtrSize {
+ r.Align = p.PtrSize
+ }
+ return r
+ }
// Look up the type in the top level declarations.
// TODO: Handle types defined within a function.
for _, d := range p.Decl {
@@ -1417,6 +1430,17 @@ func (p *Package) cgoType(e ast.Expr) *Type {
}
}
if def := typedef[t.Name]; def != nil {
+ if defgo, ok := def.Go.(*ast.Ident); ok {
+ switch defgo.Name {
+ case "complex64", "complex128":
+ // MSVC does not support the _Complex keyword
+ // nor the complex macro.
+ // Use GoComplex64 and GoComplex128 instead,
+ // which are typedef-ed to a compatible type.
+ // See go.dev/issues/36233.
+ return goTypesFixup(goTypes[defgo.Name])
+ }
+ }
return def
}
if t.Name == "uintptr" {
@@ -1430,17 +1454,7 @@ func (p *Package) cgoType(e ast.Expr) *Type {
return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")}
}
if r, ok := goTypes[t.Name]; ok {
- if r.Size == 0 { // int or uint
- rr := new(Type)
- *rr = *r
- rr.Size = p.IntSize
- rr.Align = p.IntSize
- r = rr
- }
- if r.Align > p.PtrSize {
- r.Align = p.PtrSize
- }
- return r
+ return goTypesFixup(r)
}
error_(e.Pos(), "unrecognized Go type %s", t.Name)
return &Type{Size: 4, Align: 4, C: c("int")}
@@ -1895,8 +1909,14 @@ typedef GoUintGOINTBITS GoUint;
typedef size_t GoUintptr;
typedef float GoFloat32;
typedef double GoFloat64;
+#ifdef _MSC_VER
+#include
+typedef _Fcomplex GoComplex64;
+typedef _Dcomplex GoComplex128;
+#else
typedef float _Complex GoComplex64;
typedef double _Complex GoComplex128;
+#endif
/*
static assertion to make sure the file is being used on architecture
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
index 07ece87c41..aa5063f741 100644
--- a/src/cmd/compile/internal/abi/abiutils.go
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -258,7 +258,7 @@ type RegAmounts struct {
// by the ABI rules for parameter passing and result returning.
type ABIConfig struct {
// Do we need anything more than this?
- offsetForLocals int64 // e.g., obj.(*Link).FixedFrameSize() -- extra linkage information on some architectures.
+ offsetForLocals int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures.
regAmounts RegAmounts
regsForTypeCache map[*types.Type]int
}
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index d34fdc611b..c9667bd04a 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -111,7 +111,9 @@ func moveByType(t *types.Type) obj.As {
}
// opregreg emits instructions for
-// dest := dest(To) op src(From)
+//
+// dest := dest(To) op src(From)
+//
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
@@ -280,8 +282,15 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
p.SetFrom3Reg(v.Args[1].Reg())
+ case ssa.OpAMD64SARXL, ssa.OpAMD64SARXQ,
+ ssa.OpAMD64SHLXL, ssa.OpAMD64SHLXQ,
+ ssa.OpAMD64SHRXL, ssa.OpAMD64SHRXQ:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ p.SetFrom3Reg(v.Args[0].Reg())
+
case ssa.OpAMD64SHLXLload, ssa.OpAMD64SHLXQload,
- ssa.OpAMD64SHRXLload, ssa.OpAMD64SHRXQload:
+ ssa.OpAMD64SHRXLload, ssa.OpAMD64SHRXQload,
+ ssa.OpAMD64SARXLload, ssa.OpAMD64SARXQload:
p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
ssagen.AddAux(&m, v)
@@ -289,8 +298,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpAMD64SHLXLloadidx1, ssa.OpAMD64SHLXLloadidx4, ssa.OpAMD64SHLXLloadidx8,
ssa.OpAMD64SHRXLloadidx1, ssa.OpAMD64SHRXLloadidx4, ssa.OpAMD64SHRXLloadidx8,
+ ssa.OpAMD64SARXLloadidx1, ssa.OpAMD64SARXLloadidx4, ssa.OpAMD64SARXLloadidx8,
ssa.OpAMD64SHLXQloadidx1, ssa.OpAMD64SHLXQloadidx8,
- ssa.OpAMD64SHRXQloadidx1, ssa.OpAMD64SHRXQloadidx8:
+ ssa.OpAMD64SHRXQloadidx1, ssa.OpAMD64SHRXQloadidx8,
+ ssa.OpAMD64SARXQloadidx1, ssa.OpAMD64SARXQloadidx8:
p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[2].Reg())
m := obj.Addr{Type: obj.TYPE_MEM}
memIdx(&m, v)
@@ -786,7 +797,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
- ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
+ ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2,
+ ssa.OpAMD64MOVBELloadidx1, ssa.OpAMD64MOVBELloadidx4, ssa.OpAMD64MOVBELloadidx8, ssa.OpAMD64MOVBEQloadidx1, ssa.OpAMD64MOVBEQloadidx8:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
ssagen.AddAux(&p.From, v)
@@ -808,7 +820,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpAMD64SUBLmodifyidx1, ssa.OpAMD64SUBLmodifyidx4, ssa.OpAMD64SUBLmodifyidx8, ssa.OpAMD64SUBQmodifyidx1, ssa.OpAMD64SUBQmodifyidx8,
ssa.OpAMD64ANDLmodifyidx1, ssa.OpAMD64ANDLmodifyidx4, ssa.OpAMD64ANDLmodifyidx8, ssa.OpAMD64ANDQmodifyidx1, ssa.OpAMD64ANDQmodifyidx8,
ssa.OpAMD64ORLmodifyidx1, ssa.OpAMD64ORLmodifyidx4, ssa.OpAMD64ORLmodifyidx8, ssa.OpAMD64ORQmodifyidx1, ssa.OpAMD64ORQmodifyidx8,
- ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8:
+ ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8,
+ ssa.OpAMD64MOVBEWstoreidx1, ssa.OpAMD64MOVBEWstoreidx2, ssa.OpAMD64MOVBELstoreidx1, ssa.OpAMD64MOVBELstoreidx4, ssa.OpAMD64MOVBELstoreidx8, ssa.OpAMD64MOVBEQstoreidx1, ssa.OpAMD64MOVBEQstoreidx8:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
@@ -1087,7 +1100,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
}
p := s.Prog(mov)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize // 0 on amd64, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -1387,6 +1400,16 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
}
}
+ case ssa.BlockAMD64JUMPTABLE:
+ // JMP *(TABLE)(INDEX*8)
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = b.Controls[1].Reg()
+ p.To.Index = b.Controls[0].Reg()
+ p.To.Scale = 8
+ // Save jump tables for later resolution of the target blocks.
+ s.JumpTables = append(s.JumpTables, b)
+
default:
b.Fatalf("branch not implemented: %s", b.LongString())
}
diff --git a/src/cmd/compile/internal/amd64/versions_test.go b/src/cmd/compile/internal/amd64/versions_test.go
index e28aefbd3c..1ef06f7e58 100644
--- a/src/cmd/compile/internal/amd64/versions_test.go
+++ b/src/cmd/compile/internal/amd64/versions_test.go
@@ -241,6 +241,7 @@ var featureToOpcodes = map[string][]string{
// native objdump doesn't include [QL] on linux.
"popcnt": {"popcntq", "popcntl", "popcnt"},
"bmi1": {"andnq", "andnl", "andn", "blsiq", "blsil", "blsi", "blsmskq", "blsmskl", "blsmsk", "blsrq", "blsrl", "blsr", "tzcntq", "tzcntl", "tzcnt"},
+ "bmi2": {"sarxq", "sarxl", "sarx", "shlxq", "shlxl", "shlx", "shrxq", "shrxl", "shrx"},
"sse41": {"roundsd"},
"fma": {"vfmadd231sd"},
"movbe": {"movbeqq", "movbeq", "movbell", "movbel", "movbe"},
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 063fb65b33..a53f51bd13 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -854,7 +854,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 48eb2190b2..3b6e6f6723 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -171,7 +171,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
for _, a := range v.Block.Func.RegArgs {
// Pass the spill/unspill information along to the assembler, offset by size of
// the saved LR slot.
- addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
+ addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.Arch.FixedFrameSize)
s.FuncInfo().AddSpill(
obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
}
@@ -1128,7 +1128,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
index c37a5a6990..decd261183 100644
--- a/src/cmd/compile/internal/deadcode/deadcode.go
+++ b/src/cmd/compile/internal/deadcode/deadcode.go
@@ -6,6 +6,7 @@ package deadcode
import (
"go/constant"
+ "go/token"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -86,6 +87,85 @@ func stmts(nn *ir.Nodes) {
}
}
}
+ if n.Op() == ir.OSWITCH {
+ n := n.(*ir.SwitchStmt)
+ // Use a closure wrapper here so we can use "return" to abort the analysis.
+ func() {
+ if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
+ return // no special type-switch case yet.
+ }
+ var x constant.Value // value we're switching on
+ if n.Tag != nil {
+ if ir.ConstType(n.Tag) == constant.Unknown {
+ return
+ }
+ x = n.Tag.Val()
+ } else {
+ x = constant.MakeBool(true) // switch { ... } => switch true { ... }
+ }
+ var def *ir.CaseClause
+ for _, cas := range n.Cases {
+ if len(cas.List) == 0 { // default case
+ def = cas
+ continue
+ }
+ for _, c := range cas.List {
+ if ir.ConstType(c) == constant.Unknown {
+ return // can't statically tell if it matches or not - give up.
+ }
+ if constant.Compare(x, token.EQL, c.Val()) {
+ for _, n := range cas.Body {
+ if n.Op() == ir.OFALL {
+ return // fallthrough makes it complicated - abort.
+ }
+ }
+ // This switch entry is the one that always triggers.
+ for _, cas2 := range n.Cases {
+ for _, c2 := range cas2.List {
+ if cas2 != cas || c2 != c {
+ ir.Visit(c2, markHiddenClosureDead)
+ }
+ }
+ if cas2 != cas {
+ ir.VisitList(cas2.Body, markHiddenClosureDead)
+ }
+ }
+
+ cas.List[0] = c
+ cas.List = cas.List[:1]
+ n.Cases[0] = cas
+ n.Cases = n.Cases[:1]
+ return
+ }
+ }
+ }
+ if def != nil {
+ for _, n := range def.Body {
+ if n.Op() == ir.OFALL {
+ return // fallthrough makes it complicated - abort.
+ }
+ }
+ for _, cas := range n.Cases {
+ if cas != def {
+ ir.VisitList(cas.List, markHiddenClosureDead)
+ ir.VisitList(cas.Body, markHiddenClosureDead)
+ }
+ }
+ n.Cases[0] = def
+ n.Cases = n.Cases[:1]
+ return
+ }
+
+ // TODO: handle case bodies ending with panic/return as we do in the IF case above.
+
+ // entire switch is a nop - no case ever triggers
+ for _, cas := range n.Cases {
+ ir.VisitList(cas.List, markHiddenClosureDead)
+ ir.VisitList(cas.Body, markHiddenClosureDead)
+ }
+ n.Cases = n.Cases[:0]
+ }()
+ }
if len(n.Init()) != 0 {
stmts(n.(ir.InitNode).PtrInit())
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index e249a52e57..f84368ece3 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -339,7 +339,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
localAutoOffset := func() int64 {
offs = n.FrameOffset()
- if base.Ctxt.FixedFrameSize() == 0 {
+ if base.Ctxt.Arch.FixedFrameSize == 0 {
offs -= int64(types.PtrSize)
}
if buildcfg.FramePointerEnabled {
@@ -357,7 +357,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
if n.IsOutputParamInRegisters() {
offs = localAutoOffset()
} else {
- offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ offs = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize
}
default:
@@ -547,7 +547,7 @@ func RecordFlags(flags ...string) {
fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
}
- // Adds flag to producer string singalling whether regabi is turned on or
+ // Adds flag to producer string signaling whether regabi is turned on or
// off.
// Once regabi is turned on across the board and the relative GOEXPERIMENT
// knobs no longer exist this code should be removed.
diff --git a/src/cmd/compile/internal/escape/desugar.go b/src/cmd/compile/internal/escape/desugar.go
index 8b3cc25cf9..6c21981aca 100644
--- a/src/cmd/compile/internal/escape/desugar.go
+++ b/src/cmd/compile/internal/escape/desugar.go
@@ -24,9 +24,9 @@ func fixRecoverCall(call *ir.CallExpr) {
pos := call.Pos()
- // FP is equal to caller's SP plus FixedFrameSize().
+ // FP is equal to caller's SP plus FixedFrameSize.
var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
- if off := base.Ctxt.FixedFrameSize(); off != 0 {
+ if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
}
// TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index 4713ecddca..4408a531ec 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -210,6 +210,9 @@ func (b *batch) walkFunc(fn *ir.Func) {
switch n.Op() {
case ir.OLABEL:
n := n.(*ir.LabelStmt)
+ if n.Label.IsBlank() {
+ break
+ }
if e.labels == nil {
e.labels = make(map[*types.Sym]labelState)
}
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
index 0afb5d64ef..4e8dd904ff 100644
--- a/src/cmd/compile/internal/escape/stmt.go
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -50,6 +50,9 @@ func (e *escape) stmt(n ir.Node) {
case ir.OLABEL:
n := n.(*ir.LabelStmt)
+ if n.Label.IsBlank() {
+ break
+ }
switch e.labels[n.Label] {
case nonlooping:
if base.Flag.LowerM > 2 {
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 74e4c0a890..fe8b6e9d45 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -271,6 +271,9 @@ func addGCLocals() {
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
x.Set(obj.AttrStatic, true)
}
+ for _, jt := range fn.JumpTables {
+ objw.Global(jt.Sym, int32(len(jt.Targets)*base.Ctxt.Arch.PtrSize), obj.RODATA)
+ }
}
}
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index be01914d08..8c2ea49c8f 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -522,7 +522,8 @@ func InlineCalls(fn *ir.Func) {
// but then you may as well do it here. so this is cleaner and
// shorter and less complicated.
// The result of inlnode MUST be assigned back to n, e.g.
-// n.Left = inlnode(n.Left)
+//
+// n.Left = inlnode(n.Left)
func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
if n == nil {
return n
@@ -657,7 +658,8 @@ var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCa
// inlined function body, and (List, Rlist) contain the (input, output)
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
-// n.Left = mkinlcall(n.Left, fn, isddd)
+//
+// n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
if fn.Inl == nil {
if logopt.Enabled() {
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
index eaa4d5b6b1..f0b66957f1 100644
--- a/src/cmd/compile/internal/ir/const.go
+++ b/src/cmd/compile/internal/ir/const.go
@@ -26,7 +26,7 @@ func NewString(s string) Node {
}
const (
- // Maximum size in bits for big.Ints before signalling
+ // Maximum size in bits for big.Ints before signaling
// overflow and also mantissa precision for big.Floats.
ConstPrec = 512
)
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index b5c0983d6a..4f1f582fa1 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -951,11 +951,11 @@ var IsIntrinsicCall = func(*CallExpr) bool { return false }
// instead of computing both. SameSafeExpr assumes that l and r are
// used in the same statement or expression. In order for it to be
// safe to reuse l or r, they must:
-// * be the same expression
-// * not have side-effects (no function calls, no channel ops);
-// however, panics are ok
-// * not cause inappropriate aliasing; e.g. two string to []byte
-// conversions, must result in two distinct slices
+// - be the same expression
+// - not have side-effects (no function calls, no channel ops);
+// however, panics are ok
+// - not cause inappropriate aliasing; e.g. two string to []byte
+// conversions, must result in two distinct slices
//
// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
// as an lvalue (map assignment) and an rvalue (map access). This is
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index 5e5868abb2..9ccb8e3c30 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -310,6 +310,7 @@ const (
ORESULT // result of a function call; Xoffset is stack offset
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
OLINKSYMOFFSET // offset within a name
+ OJUMPTABLE // A jump table structure for implementing dense expression switches
// opcodes for generics
ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
@@ -551,7 +552,8 @@ func SetPos(n Node) src.XPos {
}
// The result of InitExpr MUST be assigned back to n, e.g.
-// n.X = InitExpr(init, n.X)
+//
+// n.X = InitExpr(init, n.X)
func InitExpr(init []Node, expr Node) Node {
if len(init) == 0 {
return expr
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index 22ff885d68..8d6fc8c607 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -712,6 +712,28 @@ func (n *InstExpr) editChildren(edit func(Node) Node) {
editNodes(n.Targs, edit)
}
+func (n *JumpTableStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *JumpTableStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *JumpTableStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Idx != nil && do(n.Idx) {
+ return true
+ }
+ return false
+}
+func (n *JumpTableStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Idx != nil {
+ n.Idx = edit(n.Idx).(Node)
+ }
+}
+
func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *KeyExpr) copy() Node {
c := *n
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index 14eb84083a..8927f18cea 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -154,19 +154,20 @@ func _() {
_ = x[ORESULT-143]
_ = x[OINLMARK-144]
_ = x[OLINKSYMOFFSET-145]
- _ = x[ODYNAMICDOTTYPE-146]
- _ = x[ODYNAMICDOTTYPE2-147]
- _ = x[ODYNAMICTYPE-148]
- _ = x[OTAILCALL-149]
- _ = x[OGETG-150]
- _ = x[OGETCALLERPC-151]
- _ = x[OGETCALLERSP-152]
- _ = x[OEND-153]
+ _ = x[OJUMPTABLE-146]
+ _ = x[ODYNAMICDOTTYPE-147]
+ _ = x[ODYNAMICDOTTYPE2-148]
+ _ = x[ODYNAMICTYPE-149]
+ _ = x[OTAILCALL-150]
+ _ = x[OGETG-151]
+ _ = x[OGETCALLERPC-152]
+ _ = x[OGETCALLERSP-153]
+ _ = x[OEND-154]
}
-const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2REALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTFUNCINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
+const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2REALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTFUNCINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETJUMPTABLEDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 132, 134, 137, 147, 154, 161, 168, 172, 176, 184, 192, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 282, 289, 293, 296, 303, 311, 318, 324, 327, 333, 340, 348, 352, 359, 367, 369, 371, 373, 375, 377, 379, 384, 389, 397, 400, 409, 412, 416, 424, 431, 440, 453, 456, 459, 462, 465, 468, 471, 477, 480, 483, 489, 493, 496, 500, 505, 510, 516, 521, 525, 530, 538, 546, 552, 561, 572, 579, 588, 592, 599, 607, 611, 615, 622, 629, 637, 643, 652, 663, 671, 680, 685, 690, 694, 702, 707, 711, 714, 722, 726, 728, 733, 735, 740, 746, 752, 758, 764, 772, 777, 784, 789, 793, 798, 802, 807, 815, 821, 828, 835, 841, 848, 861, 875, 890, 901, 909, 913, 924, 935, 938}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 132, 134, 137, 147, 154, 161, 168, 172, 176, 184, 192, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 282, 289, 293, 296, 303, 311, 318, 324, 327, 333, 340, 348, 352, 359, 367, 369, 371, 373, 375, 377, 379, 384, 389, 397, 400, 409, 412, 416, 424, 431, 440, 453, 456, 459, 462, 465, 468, 471, 477, 480, 483, 489, 493, 496, 500, 505, 510, 516, 521, 525, 530, 538, 546, 552, 561, 572, 579, 588, 592, 599, 607, 611, 615, 622, 629, 637, 643, 652, 663, 671, 680, 685, 690, 694, 702, 707, 711, 714, 722, 726, 728, 733, 735, 740, 746, 752, 758, 764, 772, 777, 784, 789, 793, 798, 802, 807, 815, 821, 828, 835, 841, 848, 861, 870, 884, 899, 910, 918, 922, 933, 944, 947}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
index 80bd205436..0e76f17440 100644
--- a/src/cmd/compile/internal/ir/stmt.go
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
+ "go/constant"
)
// A Decl is a declaration of a const, type, or var. (A declared func is a Func.)
@@ -262,6 +263,37 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
return n
}
+// A JumpTableStmt is used to implement switches. Its semantics are:
+// tmp := jt.Idx
+// if tmp == Cases[0] goto Targets[0]
+// if tmp == Cases[1] goto Targets[1]
+// ...
+// if tmp == Cases[n] goto Targets[n]
+// Note that a JumpTableStmt is more like a multiway-goto than
+// a multiway-if. In particular, the case bodies are just
+// labels to jump to, not not full Nodes lists.
+type JumpTableStmt struct {
+ miniStmt
+
+ // Value used to index the jump table.
+ // We support only integer types that
+ // are at most the size of a uintptr.
+ Idx Node
+
+ // If Idx is equal to Cases[i], jump to Targets[i].
+ // Cases entries must be distinct and in increasing order.
+ // The length of Cases and Targets must be equal.
+ Cases []constant.Value
+ Targets []*types.Sym
+}
+
+func NewJumpTableStmt(pos src.XPos, idx Node) *JumpTableStmt {
+ n := &JumpTableStmt{Idx: idx}
+ n.pos = pos
+ n.op = OJUMPTABLE
+ return n
+}
+
// An InlineMarkStmt is a marker placed just before an inlined body.
type InlineMarkStmt struct {
miniStmt
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index 3202e506c8..bd0a6fa1a3 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -244,8 +244,10 @@ func (lv *liveness) initcache() {
// liveness effects on a variable.
//
// The possible flags are:
+//
// uevar - used by the instruction
// varkill - killed by the instruction (set)
+//
// A kill happens after the use (for an instruction that updates a value, for example).
type liveEffect int
@@ -1460,14 +1462,14 @@ func (lv *liveness) emitStackObjects() *obj.LSym {
// isfat reports whether a variable of type t needs multiple assignments to initialize.
// For example:
//
-// type T struct { x, y int }
-// x := T{x: 0, y: 1}
+// type T struct { x, y int }
+// x := T{x: 0, y: 1}
//
// Then we need:
//
-// var t T
-// t.x = 0
-// t.y = 1
+// var t T
+// t.x = 0
+// t.y = 1
//
// to fully initialize t.
func isfat(t *types.Type) bool {
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
index 1a5125207d..a18440e7b3 100644
--- a/src/cmd/compile/internal/mips/ggen.go
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -20,7 +20,7 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
@@ -30,7 +30,7 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index 6326f966bf..0411756c8d 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -792,7 +792,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index 6e12c6cb94..f3e372c3bc 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -762,7 +762,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
index 566abda963..e37e4cd661 100644
--- a/src/cmd/compile/internal/noder/expr.go
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -6,6 +6,7 @@ package noder
import (
"fmt"
+ "go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -62,6 +63,14 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
case types2.UntypedNil:
// ok; can appear in type switch case clauses
// TODO(mdempsky): Handle as part of type switches instead?
+ case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex:
+ // Untyped rhs of non-constant shift, e.g. x << 1.0.
+ // If we have a constant value, it must be an int >= 0.
+ if tv.Value != nil {
+ s := constant.ToInt(tv.Value)
+ assert(s.Kind() == constant.Int && constant.Sign(s) >= 0)
+ }
+ typ = types2.Typ[types2.Uint]
case types2.UntypedBool:
typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
case types2.UntypedString:
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index 77ca642183..cc5610acda 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -33,30 +33,36 @@ func LoadPackage(filenames []string) {
sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
noders := make([]*noder, len(filenames))
- for i, filename := range filenames {
+ for i := range noders {
p := noder{
err: make(chan syntax.Error),
}
noders[i] = &p
-
- filename := filename
- go func() {
- sem <- struct{}{}
- defer func() { <-sem }()
- defer close(p.err)
- fbase := syntax.NewFileBase(filename)
-
- f, err := os.Open(filename)
- if err != nil {
- p.error(syntax.Error{Msg: err.Error()})
- return
- }
- defer f.Close()
-
- p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, mode) // errors are tracked via p.error
- }()
}
+ // Move the entire syntax processing logic into a separate goroutine to avoid blocking on the "sem".
+ go func() {
+ for i, filename := range filenames {
+ filename := filename
+ p := noders[i]
+ sem <- struct{}{}
+ go func() {
+ defer func() { <-sem }()
+ defer close(p.err)
+ fbase := syntax.NewFileBase(filename)
+
+ f, err := os.Open(filename)
+ if err != nil {
+ p.error(syntax.Error{Msg: err.Error()})
+ return
+ }
+ defer f.Close()
+
+ p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, mode) // errors are tracked via p.error
+ }()
+ }
+ }()
+
var lines uint
for _, p := range noders {
for e := range p.err {
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index eeac8d8de7..41435a7afe 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -741,6 +741,7 @@ func (g *genInst) genericSubst(newsym *types.Sym, nameNode *ir.Name, tparams []*
// Pos of the instantiated function is same as the generic function
newf := ir.NewFunc(gf.Pos())
newf.Pragma = gf.Pragma // copy over pragmas from generic function to stenciled implementation.
+ newf.Endlineno = gf.Endlineno
newf.Nname = ir.NewNameAt(gf.Pos(), newsym)
newf.Nname.Func = newf
newf.Nname.Defn = newf
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index 2c1f2362ad..e7a4001cec 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -33,38 +33,38 @@ var localPkgReader *pkgReader
//
// The pipeline contains 2 steps:
//
-// 1) Generate package export data "stub".
+// 1. Generate package export data "stub".
//
-// 2) Generate package IR from package export data.
+// 2. Generate package IR from package export data.
//
// The package data "stub" at step (1) contains everything from the local package,
// but nothing that have been imported. When we're actually writing out export data
// to the output files (see writeNewExport function), we run the "linker", which does
// a few things:
//
-// + Updates compiler extensions data (e.g., inlining cost, escape analysis results).
+// - Updates compiler extensions data (e.g., inlining cost, escape analysis results).
//
-// + Handles re-exporting any transitive dependencies.
+// - Handles re-exporting any transitive dependencies.
//
-// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any
-// downstream importers only care about inlinable functions).
+// - Prunes out any unnecessary details (e.g., non-inlineable functions, because any
+// downstream importers only care about inlinable functions).
//
// The source files are typechecked twice, once before writing export data
// using types2 checker, once after read export data using gc/typecheck.
// This duplication of work will go away once we always use types2 checker,
// we can remove the gc/typecheck pass. The reason it is still here:
//
-// + It reduces engineering costs in maintaining a fork of typecheck
-// (e.g., no need to backport fixes like CL 327651).
+// - It reduces engineering costs in maintaining a fork of typecheck
+// (e.g., no need to backport fixes like CL 327651).
//
-// + It makes it easier to pass toolstash -cmp.
+// - It makes it easier to pass toolstash -cmp.
//
-// + Historically, we would always re-run the typechecker after import, even though
-// we know the imported data is valid. It's not ideal, but also not causing any
-// problem either.
+// - Historically, we would always re-run the typechecker after import, even though
+// we know the imported data is valid. It's not ideal, but also not causing any
+// problem either.
//
-// + There's still transformation that being done during gc/typecheck, like rewriting
-// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
+// - There's still transformation that being done during gc/typecheck, like rewriting
+// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
//
// Using syntax+types2 tree, which already has a complete representation of generics,
// the unified IR has the full typed AST for doing introspection during step (1).
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
index 40f1408260..32e95bedc2 100644
--- a/src/cmd/compile/internal/pkginit/init.go
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -65,9 +65,9 @@ func MakeInit() {
// Task makes and returns an initialization record for the package.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
-// 1) Initialize all of the packages the current package depends on.
-// 2) Initialize all the variables that have initializers.
-// 3) Run any init functions.
+// 1. Initialize all of the packages the current package depends on.
+// 2. Initialize all the variables that have initializers.
+// 3. Run any init functions.
func Task() *ir.Name {
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index 7877be3336..4c935cfc71 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -19,17 +19,17 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i)
}
} else if cnt <= int64(128*types.PtrSize) {
- p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
- p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index da74cacd95..8689bd8b27 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -476,7 +476,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -509,7 +509,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
for _, a := range v.Block.Func.RegArgs {
// Pass the spill/unspill information along to the assembler, offset by size of
// the saved LR slot.
- addr := ssagen.SpillSlotAddr(a, ppc64.REGSP, base.Ctxt.FixedFrameSize())
+ addr := ssagen.SpillSlotAddr(a, ppc64.REGSP, base.Ctxt.Arch.FixedFrameSize)
s.FuncInfo().AddSpill(
obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
index d000618bd6..9fe90da0fe 100644
--- a/src/cmd/compile/internal/reflectdata/alg.go
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -412,22 +412,25 @@ func geneq(t *types.Type) *obj.LSym {
//
// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
// } else {
- // return
+ // goto neq
// }
//
// And so on.
//
// Otherwise it generates:
//
- // for i := 0; i < nelem; i++ {
- // if eq(p[i], q[i]) {
+ // iterateTo := nelem/unroll*unroll
+ // for i := 0; i < iterateTo; i += unroll {
+ // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
// } else {
// goto neq
// }
// }
+ // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
+ // } else {
+ // goto neq
+ // }
//
- // TODO(josharian): consider doing some loop unrolling
- // for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i ir.Node) ir.Node {
@@ -442,38 +445,62 @@ func geneq(t *types.Type) *obj.LSym {
return eq(pi, qi)
}
- if nelem <= unroll {
- if last {
- // Do last comparison in a different manner.
- nelem--
- }
- // Generate a series of checks.
- for i := int64(0); i < nelem; i++ {
- // if check {} else { goto neq }
- nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(i)), nil, nil)
- nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
- fn.Body.Append(nif)
- }
- if last {
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(nelem))))
- }
- } else {
- // Generate a for loop.
- // for i := 0; i < nelem; i++
+ iterations := nelem / unroll
+ iterateTo := iterations * unroll
+ // If a loop is iterated only once, there shouldn't be any loop at all.
+ if iterations == 1 {
+ iterateTo = 0
+ }
+
+ if iterateTo > 0 {
+ // Generate an unrolled for loop.
+ // for i := 0; i < nelem/unroll*unroll; i += unroll
i := typecheck.Temp(types.Types[types.TINT])
init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
- cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(nelem))
- post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
- loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(iterateTo))
+ loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil)
loop.PtrInit().Append(init)
- // if eq(pi, qi) {} else { goto neq }
- nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
- nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
- loop.Body.Append(nif)
- fn.Body.Append(loop)
- if last {
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
+
+ // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
+ // } else {
+ // goto neq
+ // }
+ for j := int64(0); j < unroll; j++ {
+ // if check {} else { goto neq }
+ nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ loop.Body.Append(nif)
+ post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+ loop.Body.Append(post)
}
+
+ fn.Body.Append(loop)
+
+ if nelem == iterateTo {
+ if last {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
+ }
+ return
+ }
+ }
+
+ // Generate remaining checks, if nelem is not a multiple of unroll.
+ if last {
+ // Do last comparison in a different manner.
+ nelem--
+ }
+ // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
+ // } else {
+ // goto neq
+ // }
+ for j := iterateTo; j < nelem; j++ {
+ // if check {} else { goto neq }
+ nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(j)), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(nif)
+ }
+ if last {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(nelem))))
}
}
@@ -481,7 +508,6 @@ func geneq(t *types.Type) *obj.LSym {
case types.TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
- // TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := EqString(pi, qi)
@@ -655,7 +681,8 @@ func anyCall(fn *ir.Func) bool {
}
// eqfield returns the node
-// p.field == q.field
+//
+// p.field == q.field
func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
@@ -664,9 +691,13 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
}
// EqString returns the nodes
-// len(s) == len(t)
+//
+// len(s) == len(t)
+//
// and
-// memequal(s.ptr, t.ptr, len(s))
+//
+// memequal(s.ptr, t.ptr, len(s))
+//
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
@@ -688,9 +719,13 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
}
// EqInterface returns the nodes
-// s.tab == t.tab (or s.typ == t.typ, as appropriate)
+//
+// s.tab == t.tab (or s.typ == t.typ, as appropriate)
+//
// and
-// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
+//
+// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
+//
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
@@ -724,7 +759,8 @@ func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
}
// eqmem returns the node
-// memequal(&p.field, &q.field [, size])
+//
+// memequal(&p.field, &q.field [, size])
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
diff --git a/src/cmd/compile/internal/reflectdata/alg_test.go b/src/cmd/compile/internal/reflectdata/alg_test.go
new file mode 100644
index 0000000000..1e57b913fd
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/alg_test.go
@@ -0,0 +1,76 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata_test
+
+import "testing"
+
+func BenchmarkEqArrayOfStrings5(b *testing.B) {
+ var a [5]string
+ var c [5]string
+
+ for i := 0; i < 5; i++ {
+ a[i] = "aaaa"
+ c[i] = "cccc"
+ }
+
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfStrings64(b *testing.B) {
+ var a [64]string
+ var c [64]string
+
+ for i := 0; i < 64; i++ {
+ a[i] = "aaaa"
+ c[i] = "cccc"
+ }
+
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfStrings1024(b *testing.B) {
+ var a [1024]string
+ var c [1024]string
+
+ for i := 0; i < 1024; i++ {
+ a[i] = "aaaa"
+ c[i] = "cccc"
+ }
+
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfFloats5(b *testing.B) {
+ var a [5]float32
+ var c [5]float32
+
+ for i := 0; i < b.N; i++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfFloats64(b *testing.B) {
+ var a [64]float32
+ var c [64]float32
+
+ for i := 0; i < b.N; i++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfFloats1024(b *testing.B) {
+ var a [1024]float32
+ var c [1024]float32
+
+ for i := 0; i < b.N; i++ {
+ _ = a == c
+ }
+}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 1137101650..affc6799ab 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -667,10 +667,10 @@ var kinds = []int{
// tflag is documented in reflect/type.go.
//
// tflag values must be kept in sync with copies in:
-// - cmd/compile/internal/reflectdata/reflect.go
-// - cmd/link/internal/ld/decodesym.go
-// - reflect/type.go
-// - runtime/type.go
+// - cmd/compile/internal/reflectdata/reflect.go
+// - cmd/link/internal/ld/decodesym.go
+// - reflect/type.go
+// - runtime/type.go
const (
tflagUncommon = 1 << 0
tflagExtraStar = 1 << 1
@@ -1355,21 +1355,21 @@ func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
// type itab struct {
// inter *interfacetype
// _type *_type
- // hash uint32
+ // hash uint32 // copy of _type.hash. Used for type switches.
// _ [4]byte
- // fun [1]uintptr // variable sized
+ // fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
// }
o := objw.SymPtr(lsym, 0, writeType(iface), 0)
o = objw.SymPtr(lsym, o, writeType(typ), 0)
o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
o += 4 // skip unused field
+ if !completeItab {
+ // If typ doesn't implement iface, make method entries be zero.
+ o = objw.Uintptr(lsym, o, 0)
+ entries = entries[:0]
+ }
for _, fn := range entries {
- if !completeItab {
- // If typ doesn't implement iface, make method entries be zero.
- o = objw.Uintptr(lsym, o, 0)
- } else {
- o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
- }
+ o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
}
// Nothing writes static itabs, so they are read only.
objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
@@ -1821,13 +1821,17 @@ func NeedEmit(typ *types.Type) bool {
// Also wraps methods on instantiated generic types for use in itab entries.
// For an instantiated generic type G[int], we generate wrappers like:
// G[int] pointer shaped:
+//
// func (x G[int]) f(arg) {
// .inst.G[int].f(dictionary, x, arg)
-// }
+// }
+//
// G[int] not pointer shaped:
+//
// func (x *G[int]) f(arg) {
// .inst.G[int].f(dictionary, *x, arg)
-// }
+// }
+//
// These wrappers are always fully stenciled.
func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
orig := rcvr
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
index 0f37f65fcf..44488e4327 100644
--- a/src/cmd/compile/internal/riscv64/ggen.go
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -19,7 +19,7 @@ func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
// Adjust the frame to account for LR.
- off += base.Ctxt.FixedFrameSize()
+ off += base.Ctxt.Arch.FixedFrameSize
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index b6e6dc1a03..5f74fd876c 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -237,7 +237,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
for _, a := range v.Block.Func.RegArgs {
// Pass the spill/unspill information along to the assembler, offset by size of
// the saved LR slot.
- addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.FixedFrameSize())
+ addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize)
s.FuncInfo().AddSpill(
obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
}
@@ -669,7 +669,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(riscv.AMOV)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
index 488a080c46..70e4031224 100644
--- a/src/cmd/compile/internal/s390x/ggen.go
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -24,7 +24,7 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
// Adjust the frame to account for LR.
- off += base.Ctxt.FixedFrameSize()
+ off += base.Ctxt.Arch.FixedFrameSize
reg := int16(s390x.REGSP)
// If the off cannot fit in a 12-bit unsigned displacement then we
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index deb6c79006..7d9b31de4c 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -132,7 +132,9 @@ func moveByType(t *types.Type) obj.As {
}
// opregreg emits instructions for
-// dest := dest(To) op src(From)
+//
+// dest := dest(To) op src(From)
+//
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
@@ -145,7 +147,9 @@ func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
}
// opregregimm emits instructions for
+//
// dest := src(From) op off
+//
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
@@ -546,7 +550,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
index 28fa86cd64..469ba0d494 100644
--- a/src/cmd/compile/internal/ssa/addressingmodes.go
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -131,10 +131,14 @@ var needSplit = map[Op]bool{
}
// For each entry k, v in this map, if we have a value x with:
-// x.Op == k[0]
-// x.Args[0].Op == k[1]
+//
+// x.Op == k[0]
+// x.Args[0].Op == k[1]
+//
// then we can set x.Op to v and set x.Args like this:
-// x.Args[0].Args + x.Args[1:]
+//
+// x.Args[0].Args + x.Args[1:]
+//
// Additionally, the Aux/AuxInt from x.Args[0] is merged into x.
var combine = map[[2]Op]Op{
// amd64
@@ -340,11 +344,18 @@ var combine = map[[2]Op]Op{
[2]Op{OpAMD64DIVSDload, OpAMD64LEAQ1}: OpAMD64DIVSDloadidx1,
[2]Op{OpAMD64DIVSDload, OpAMD64LEAQ8}: OpAMD64DIVSDloadidx8,
+ [2]Op{OpAMD64SARXLload, OpAMD64ADDQ}: OpAMD64SARXLloadidx1,
+ [2]Op{OpAMD64SARXQload, OpAMD64ADDQ}: OpAMD64SARXQloadidx1,
[2]Op{OpAMD64SHLXLload, OpAMD64ADDQ}: OpAMD64SHLXLloadidx1,
[2]Op{OpAMD64SHLXQload, OpAMD64ADDQ}: OpAMD64SHLXQloadidx1,
[2]Op{OpAMD64SHRXLload, OpAMD64ADDQ}: OpAMD64SHRXLloadidx1,
[2]Op{OpAMD64SHRXQload, OpAMD64ADDQ}: OpAMD64SHRXQloadidx1,
+ [2]Op{OpAMD64SARXLload, OpAMD64LEAQ1}: OpAMD64SARXLloadidx1,
+ [2]Op{OpAMD64SARXLload, OpAMD64LEAQ4}: OpAMD64SARXLloadidx4,
+ [2]Op{OpAMD64SARXLload, OpAMD64LEAQ8}: OpAMD64SARXLloadidx8,
+ [2]Op{OpAMD64SARXQload, OpAMD64LEAQ1}: OpAMD64SARXQloadidx1,
+ [2]Op{OpAMD64SARXQload, OpAMD64LEAQ8}: OpAMD64SARXQloadidx8,
[2]Op{OpAMD64SHLXLload, OpAMD64LEAQ1}: OpAMD64SHLXLloadidx1,
[2]Op{OpAMD64SHLXLload, OpAMD64LEAQ4}: OpAMD64SHLXLloadidx4,
[2]Op{OpAMD64SHLXLload, OpAMD64LEAQ8}: OpAMD64SHLXLloadidx8,
@@ -356,6 +367,26 @@ var combine = map[[2]Op]Op{
[2]Op{OpAMD64SHRXQload, OpAMD64LEAQ1}: OpAMD64SHRXQloadidx1,
[2]Op{OpAMD64SHRXQload, OpAMD64LEAQ8}: OpAMD64SHRXQloadidx8,
+ // amd64/v3
+ [2]Op{OpAMD64MOVBELload, OpAMD64ADDQ}: OpAMD64MOVBELloadidx1,
+ [2]Op{OpAMD64MOVBEQload, OpAMD64ADDQ}: OpAMD64MOVBEQloadidx1,
+ [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ1}: OpAMD64MOVBELloadidx1,
+ [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ4}: OpAMD64MOVBELloadidx4,
+ [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ8}: OpAMD64MOVBELloadidx8,
+ [2]Op{OpAMD64MOVBEQload, OpAMD64LEAQ1}: OpAMD64MOVBEQloadidx1,
+ [2]Op{OpAMD64MOVBEQload, OpAMD64LEAQ8}: OpAMD64MOVBEQloadidx8,
+
+ [2]Op{OpAMD64MOVBEWstore, OpAMD64ADDQ}: OpAMD64MOVBEWstoreidx1,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64ADDQ}: OpAMD64MOVBELstoreidx1,
+ [2]Op{OpAMD64MOVBEQstore, OpAMD64ADDQ}: OpAMD64MOVBEQstoreidx1,
+ [2]Op{OpAMD64MOVBEWstore, OpAMD64LEAQ1}: OpAMD64MOVBEWstoreidx1,
+ [2]Op{OpAMD64MOVBEWstore, OpAMD64LEAQ2}: OpAMD64MOVBEWstoreidx2,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ1}: OpAMD64MOVBELstoreidx1,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ4}: OpAMD64MOVBELstoreidx4,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ8}: OpAMD64MOVBELstoreidx8,
+ [2]Op{OpAMD64MOVBEQstore, OpAMD64LEAQ1}: OpAMD64MOVBEQstoreidx1,
+ [2]Op{OpAMD64MOVBEQstore, OpAMD64LEAQ8}: OpAMD64MOVBEQstoreidx8,
+
// 386
[2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1,
[2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1,
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index 4d21ade3e3..db7df3f338 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -71,19 +71,25 @@ type Block struct {
// Edge represents a CFG edge.
// Example edges for b branching to either c or d.
// (c and d have other predecessors.)
-// b.Succs = [{c,3}, {d,1}]
-// c.Preds = [?, ?, ?, {b,0}]
-// d.Preds = [?, {b,1}, ?]
+//
+// b.Succs = [{c,3}, {d,1}]
+// c.Preds = [?, ?, ?, {b,0}]
+// d.Preds = [?, {b,1}, ?]
+//
// These indexes allow us to edit the CFG in constant time.
// In addition, it informs phi ops in degenerate cases like:
-// b:
-// if k then c else c
-// c:
-// v = Phi(x, y)
+//
+// b:
+// if k then c else c
+// c:
+// v = Phi(x, y)
+//
// Then the indexes tell you whether x is chosen from
// the if or else branch from b.
-// b.Succs = [{c,0},{c,1}]
-// c.Preds = [{b,0},{b,1}]
+//
+// b.Succs = [{c,0},{c,1}]
+// c.Preds = [{b,0},{b,1}]
+//
// means x is chosen if k is true.
type Edge struct {
// block edge goes to (in a Succs list) or from (in a Preds list)
@@ -106,12 +112,13 @@ func (e Edge) String() string {
}
// BlockKind is the kind of SSA block.
-// kind controls successors
-// ------------------------------------------
-// Exit [return mem] []
-// Plain [] [next]
-// If [boolean Value] [then, else]
-// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
+//
+// kind controls successors
+// ------------------------------------------
+// Exit [return mem] []
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
type BlockKind int8
// short form print
@@ -330,10 +337,12 @@ func (b *Block) swapSuccessors() {
//
// b.removePred(i)
// for _, v := range b.Values {
-// if v.Op != OpPhi {
-// continue
-// }
-// b.removeArg(v, i)
+//
+// if v.Op != OpPhi {
+// continue
+// }
+// b.removeArg(v, i)
+//
// }
func (b *Block) removePhiArg(phi *Value, i int) {
n := len(b.Preds)
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index 59773ef31b..7a08654f4e 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -11,11 +11,11 @@ import "cmd/internal/src"
//
// Search for basic blocks that look like
//
-// bb0 bb0
-// | \ / \
-// | bb1 or bb1 bb2 <- trivial if/else blocks
-// | / \ /
-// bb2 bb3
+// bb0 bb0
+// | \ / \
+// | bb1 or bb1 bb2 <- trivial if/else blocks
+// | / \ /
+// bb2 bb3
//
// where the intermediate blocks are mostly empty (with no side-effects);
// rewrite Phis in the postdominator as CondSelects.
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index 28edfd2237..df677e674a 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -100,6 +100,10 @@ func checkFunc(f *Func) {
if b.NumControls() != 0 {
f.Fatalf("plain/dead block %s has a control value", b)
}
+ case BlockJumpTable:
+ if b.NumControls() != 1 {
+ f.Fatalf("jumpTable block %s has no control value", b)
+ }
}
if len(b.Succs) != 2 && b.Likely != BranchUnknown {
f.Fatalf("likeliness prediction %d for block %s with %d successors", b.Likely, b, len(b.Succs))
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index f95140eaf9..5e898ab96f 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -250,8 +250,8 @@ var GenssaDump map[string]bool = make(map[string]bool) // names of functions to
// version is used as a regular expression to match the phase name(s).
//
// Special cases that have turned out to be useful:
-// - ssa/check/on enables checking after each phase
-// - ssa/all/time enables time reporting for all phases
+// - ssa/check/on enables checking after each phase
+// - ssa/all/time enables time reporting for all phases
//
// See gc/lex.go for dissection of the option string.
// Example uses:
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index b9c98bdba9..931ef454fc 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -168,6 +168,9 @@ type Frontend interface {
// MyImportPath provides the import name (roughly, the package) for the function being compiled.
MyImportPath() string
+
+ // LSym returns the linker symbol of the function being compiled.
+ LSym() string
}
// NewConfig returns a new configuration object for the given architecture.
@@ -297,8 +300,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
c.registers = registersRISCV64[:]
c.gpRegMask = gpRegMaskRISCV64
c.fpRegMask = fpRegMaskRISCV64
- // c.intParamRegs = paramIntRegRISCV64
- // c.floatParamRegs = paramFloatRegRISCV64
+ c.intParamRegs = paramIntRegRISCV64
+ c.floatParamRegs = paramFloatRegRISCV64
c.FPReg = framepointerRegRISCV64
c.hasGReg = true
case "wasm":
@@ -329,8 +332,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
c.floatParamRegs = nil // no FP registers in softfloat mode
}
- c.ABI0 = abi.NewABIConfig(0, 0, ctxt.FixedFrameSize())
- c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.FixedFrameSize())
+ c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize)
+ c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize)
// On Plan 9, floating point operations are not allowed in note handler.
if buildcfg.GOOS == "plan9" {
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
index ade5e0648e..f4b799394c 100644
--- a/src/cmd/compile/internal/ssa/cse.go
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -235,14 +235,15 @@ type eqclass []*Value
// partitionValues partitions the values into equivalence classes
// based on having all the following features match:
-// - opcode
-// - type
-// - auxint
-// - aux
-// - nargs
-// - block # if a phi op
-// - first two arg's opcodes and auxint
-// - NOT first two arg's aux; that can break CSE.
+// - opcode
+// - type
+// - auxint
+// - aux
+// - nargs
+// - block # if a phi op
+// - first two arg's opcodes and auxint
+// - NOT first two arg's aux; that can break CSE.
+//
// partitionValues returns a list of equivalence classes, each
// being a sorted by ID list of *Values. The eqclass slices are
// backed by the same storage as the input slice.
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index 08dc5c468e..2c18d35204 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -402,28 +402,28 @@ func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
// OpArg{Int,Float}Reg values, inserting additional values in
// cases where they are missing. Example:
//
-// func foo(s string, used int, notused int) int {
-// return len(s) + used
-// }
+// func foo(s string, used int, notused int) int {
+// return len(s) + used
+// }
//
// In the function above, the incoming parameter "used" is fully live,
// "notused" is not live, and "s" is partially live (only the length
// field of the string is used). At the point where debug value
// analysis runs, we might expect to see an entry block with:
//
-// b1:
-// v4 = ArgIntReg {s+8} [0] : BX
-// v5 = ArgIntReg {used} [0] : CX
+// b1:
+// v4 = ArgIntReg {s+8} [0] : BX
+// v5 = ArgIntReg {used} [0] : CX
//
// While this is an accurate picture of the live incoming params,
// we also want to have debug locations for non-live params (or
// their non-live pieces), e.g. something like
//
-// b1:
-// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
-// v4 = ArgIntReg {s+8} [0] : BX
-// v5 = ArgIntReg {used} [0] : CX
-// v10 = ArgIntReg {unused} [0] : DI
+// b1:
+// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
+// v4 = ArgIntReg {s+8} [0] : BX
+// v5 = ArgIntReg {used} [0] : CX
+// v10 = ArgIntReg {unused} [0] : DI
//
// This function examines the live OpArg{Int,Float}Reg values and
// synthesizes new (dead) values for the non-live params or the
@@ -1489,14 +1489,14 @@ func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int)
// that spills a register arg. It returns the ID of that instruction
// Example:
//
-// b1:
-// v3 = ArgIntReg {p1+0} [0] : AX
-// ... more arg regs ..
-// v4 = ArgFloatReg {f1+0} [0] : X0
-// v52 = MOVQstore {p1} v2 v3 v1
-// ... more stores ...
-// v68 = MOVSSstore {f4} v2 v67 v66
-// v38 = MOVQstoreconst {blob} [val=0,off=0] v2 v32
+// b1:
+// v3 = ArgIntReg {p1+0} [0] : AX
+// ... more arg regs ..
+// v4 = ArgFloatReg {f1+0} [0] : X0
+// v52 = MOVQstore {p1} v2 v3 v1
+// ... more stores ...
+// v68 = MOVSSstore {f4} v2 v67 v66
+// v38 = MOVQstoreconst {blob} [val=0,off=0] v2 v32
//
// Important: locatePrologEnd is expected to work properly only with
// optimization turned off (e.g. "-N"). If optimization is enabled
diff --git a/src/cmd/compile/internal/ssa/debug_test.go b/src/cmd/compile/internal/ssa/debug_test.go
index 2fc12557c0..c807863ea6 100644
--- a/src/cmd/compile/internal/ssa/debug_test.go
+++ b/src/cmd/compile/internal/ssa/debug_test.go
@@ -84,7 +84,7 @@ var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gog
// "O" is an explicit indication that we expect it to be optimized out.
// For example:
//
-// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
+// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
//
// TODO: not implemented for Delve yet, but this is the plan
//
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index a3cea855f2..90ea2d5040 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -656,15 +656,16 @@ outer:
// It decomposes a Load or an Arg into smaller parts and returns the new mem.
// If the type does not match one of the expected aggregate types, it returns nil instead.
// Parameters:
-// pos -- the location of any generated code.
-// b -- the block into which any generated code should normally be placed
-// source -- the value, possibly an aggregate, to be stored.
-// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
-// t -- the type of the value to be stored
-// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
-// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
-// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
-// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
+//
+// pos -- the location of any generated code.
+// b -- the block into which any generated code should normally be placed
+// source -- the value, possibly an aggregate, to be stored.
+// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
+// t -- the type of the value to be stored
+// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
+// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
+// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
+// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
pa := x.prAssignForArg(source)
@@ -777,15 +778,16 @@ func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off
// It decomposes a Load into smaller parts and returns the new mem.
// If the type does not match one of the expected aggregate types, it returns nil instead.
// Parameters:
-// pos -- the location of any generated code.
-// b -- the block into which any generated code should normally be placed
-// source -- the value, possibly an aggregate, to be stored.
-// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
-// t -- the type of the value to be stored
-// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset
-// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
-// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
-// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
+//
+// pos -- the location of any generated code.
+// b -- the block into which any generated code should normally be placed
+// source -- the value, possibly an aggregate, to be stored.
+// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
+// t -- the type of the value to be stored
+// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset
+// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
+// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
+// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
//
// TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates.
func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
@@ -1106,7 +1108,7 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
a0 := a.Args[0]
if a0.Op == OpLocalAddr {
n := a0.Aux.(*ir.Name)
- if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.FixedFrameSize() == aOffset {
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
continue
}
}
@@ -1127,7 +1129,7 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
// It's common for a tail call passing the same arguments (e.g. method wrapper),
// so this would be a self copy. Detect this and optimize it out.
n := a.Aux.(*ir.Name)
- if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.FixedFrameSize() == aOffset {
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
continue
}
}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index c4e87ec7d0..87d1b41419 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -102,6 +102,9 @@ func (d TestFrontend) Debug_checknil() bool { retu
func (d TestFrontend) MyImportPath() string {
return "my/import/path"
}
+func (d TestFrontend) LSym() string {
+ return "my/import/path.function"
+}
var testTypes Types
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 0b5392f0f0..35a9382663 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -820,17 +820,22 @@ func (f *Func) invalidateCFG() {
}
// DebugHashMatch reports whether environment variable evname
-// 1) is empty (this is a special more-quickly implemented case of 3)
-// 2) is "y" or "Y"
-// 3) is a suffix of the sha1 hash of name
-// 4) is a suffix of the environment variable
+// 1. is empty (this is a special more-quickly implemented case of 3)
+// 2. is "y" or "Y"
+// 3. is a suffix of the sha1 hash of name
+// 4. is a suffix of the environment variable
// fmt.Sprintf("%s%d", evname, n)
// provided that all such variables are nonempty for 0 <= i <= n
+//
// Otherwise it returns false.
// When true is returned the message
-// "%s triggered %s\n", evname, name
+//
+// "%s triggered %s\n", evname, name
+//
// is printed on the file named in environment variable
-// GSHS_LOGFILE
+//
+// GSHS_LOGFILE
+//
// or standard out if that is empty or there is an error
// opening the file.
func (f *Func) DebugHashMatch(evname string) bool {
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index fec2ba8773..2b176dfa7b 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -55,19 +55,21 @@ func fuse(f *Func, typ fuseType) {
// fuseBlockIf handles the following cases where s0 and s1 are empty blocks.
//
-// b b b b
-// \ / \ / | \ / \ / | | |
-// s0 s1 | s1 s0 | | |
-// \ / | / \ | | |
-// ss ss ss ss
+// b b b b
+// \ / \ / | \ / \ / | | |
+// s0 s1 | s1 s0 | | |
+// \ / | / \ | | |
+// ss ss ss ss
//
// If all Phi ops in ss have identical variables for slots corresponding to
// s0, s1 and b then the branch can be dropped.
// This optimization often comes up in switch statements with multiple
// expressions in a case clause:
-// switch n {
-// case 1,2,3: return 4
-// }
+//
+// switch n {
+// case 1,2,3: return 4
+// }
+//
// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway.
func fuseBlockIf(b *Block) bool {
if b.Kind != BlockIf {
diff --git a/src/cmd/compile/internal/ssa/fuse_branchredirect.go b/src/cmd/compile/internal/ssa/fuse_branchredirect.go
index 27449db55a..59570968a2 100644
--- a/src/cmd/compile/internal/ssa/fuse_branchredirect.go
+++ b/src/cmd/compile/internal/ssa/fuse_branchredirect.go
@@ -8,21 +8,24 @@ package ssa
// of an If block can be derived from its predecessor If block, in
// some such cases, we can redirect the predecessor If block to the
// corresponding successor block directly. For example:
-// p:
-// v11 = Less64 v10 v8
-// If v11 goto b else u
-// b: <- p ...
-// v17 = Leq64 v10 v8
-// If v17 goto s else o
+//
+// p:
+// v11 = Less64 v10 v8
+// If v11 goto b else u
+// b: <- p ...
+// v17 = Leq64 v10 v8
+// If v17 goto s else o
+//
// We can redirect p to s directly.
//
// The implementation here borrows the framework of the prove pass.
-// 1, Traverse all blocks of function f to find If blocks.
-// 2, For any If block b, traverse all its predecessors to find If blocks.
-// 3, For any If block predecessor p, update relationship p->b.
-// 4, Traverse all successors of b.
-// 5, For any successor s of b, try to update relationship b->s, if a
-// contradiction is found then redirect p to another successor of b.
+//
+// 1, Traverse all blocks of function f to find If blocks.
+// 2, For any If block b, traverse all its predecessors to find If blocks.
+// 3, For any If block predecessor p, update relationship p->b.
+// 4, Traverse all successors of b.
+// 5, For any successor s of b, try to update relationship b->s, if a
+// contradiction is found then redirect p to another successor of b.
func fuseBranchRedirect(f *Func) bool {
ft := newFactsTable(f)
ft.checkpoint()
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
index d843fc3fda..f5fb84b0d7 100644
--- a/src/cmd/compile/internal/ssa/fuse_comparisons.go
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -9,22 +9,22 @@ package ssa
//
// Look for branch structure like:
//
-// p
-// |\
-// | b
-// |/ \
-// s0 s1
+// p
+// |\
+// | b
+// |/ \
+// s0 s1
//
// In our example, p has control '1 <= x', b has control 'x < 5',
// and s0 and s1 are the if and else results of the comparison.
//
// This will be optimized into:
//
-// p
-// \
-// b
-// / \
-// s0 s1
+// p
+// \
+// b
+// / \
+// s0 s1
//
// where b has the combined control value 'unsigned(x-1) < 4'.
// Later passes will then fuse p and b.
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index d50bdf2a17..81fdebaf49 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -206,6 +206,11 @@
(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
+// Prefer SARX/SHLX/SHRX instruction because it has less register restriction on the shift input.
+(SAR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SARX(Q|L) x y)
+(SHL(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHLX(Q|L) x y)
+(SHR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHRX(Q|L) x y)
+
// Lowering integer comparisons
(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
@@ -512,6 +517,8 @@
(If cond yes no) => (NE (TESTB cond cond) yes no)
+(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ {makeJumpTableSym(b)} (SB)))
+
// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here.
(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
@@ -590,6 +597,8 @@
// mutandis, for UGE and SETAE, and CC and SETCC.
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
+((NE|EQ) (TESTL (SHLXL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
+((NE|EQ) (TESTQ (SHLXQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
=> ((ULT|UGE) (BTLconst [int8(log32(c))] x))
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
@@ -598,6 +607,8 @@
=> ((ULT|UGE) (BTQconst [int8(log64(c))] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
+(SET(NE|EQ) (TESTL (SHLXL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
+(SET(NE|EQ) (TESTQ (SHLXQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
=> (SET(B|AE) (BTLconst [int8(log32(c))] x))
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
@@ -609,6 +620,10 @@
=> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
=> (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
@@ -621,9 +636,10 @@
(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x)
(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
+(BT(Q|L)const [0] s:(SHRXQ x y)) => (BTQ y x)
(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x)
-(BTLconst [0] s:(SHRL x y)) => (BTL y x)
+(BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x)
// Rewrite a & 1 != 1 into a & 1 == 0.
// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
@@ -635,6 +651,8 @@
// Recognize bit setting (a |= 1< (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
+(OR(Q|L) (SHLX(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
+(XOR(Q|L) (SHLX(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
// Convert ORconst into BTS, if the code gets smaller, with boundary being
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
@@ -650,6 +668,8 @@
// Recognize bit clearing: a &^= 1< (BTR(Q|L) x y)
(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
+(AND(Q|L) (NOT(Q|L) (SHLX(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
+(ANDN(Q|L) x (SHLX(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRQconst [int8(log32(^c))] x)
(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
@@ -791,6 +811,8 @@
(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
+(SHLXQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
+(SHLXL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
@@ -798,33 +820,36 @@
(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
+(SHRXQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
+(SHRXL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
-
+(SARXQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
+(SARXL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
// Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
-((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
-((SHLQ|SHRQ|SARQ) x (NEGQ (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ y))
-((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
-((SHLQ|SHRQ|SARQ) x (NEGQ (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ y))
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ y))
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ y))
-((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
-((SHLL|SHRL|SARL) x (NEGQ (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ y))
-((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
-((SHLL|SHRL|SARL) x (NEGQ (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ y))
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ y))
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ y))
-((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
-((SHLQ|SHRQ|SARQ) x (NEGL (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL y))
-((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
-((SHLQ|SHRQ|SARQ) x (NEGL (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL y))
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL y))
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
+((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL y))
-((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
-((SHLL|SHRL|SARL) x (NEGL (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL y))
-((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
-((SHLL|SHRL|SARL) x (NEGL (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL y))
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL y))
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
+((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL y))
// Constant rotate instructions
((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c])
@@ -856,9 +881,13 @@
// it in order to strip it out.
(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
+(ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
+(ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
+(ORL (SHLXL x y) (ANDL (SHRXL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
+(ORL (SHRXL x y) (ANDL (SHLXL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
// Help with rotate detection
(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT)
@@ -873,6 +902,15 @@
(SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
&& v.Type.Size() == 2
=> (RORW x y)
+(ORL (SHLXL x (AND(Q|L)const y [15]))
+ (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
+ (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
+ && v.Type.Size() == 2
+ => (ROLW x y)
+(ORL (SHRW x (AND(Q|L)const y [15]))
+ (SHLXL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
+ && v.Type.Size() == 2
+ => (RORW x y)
(ORL (SHLL x (AND(Q|L)const y [ 7]))
(ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
@@ -883,6 +921,15 @@
(SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
&& v.Type.Size() == 1
=> (RORB x y)
+(ORL (SHLXL x (AND(Q|L)const y [ 7]))
+ (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
+ (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
+ && v.Type.Size() == 1
+ => (ROLB x y)
+(ORL (SHRB x (AND(Q|L)const y [ 7]))
+ (SHLXL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
+ && v.Type.Size() == 1
+ => (RORB x y)
// rotate left negative = rotate right
(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
@@ -916,6 +963,7 @@
// Multi-register shifts
(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
+(ORQ (SH(R|L)XQ lo bits) (SH(L|R)XQ hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
@@ -2252,5 +2300,10 @@
&& clobber(x0, x1, sh)
=> @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)
-(SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
-(SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
+(SARX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
+(SHLX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
+(SHRX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
+
+((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index d760d7d79e..fc42fa5e28 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -937,13 +937,41 @@ func init() {
{name: "MOVBELstore", argLength: 3, reg: gpstore, asm: "MOVBEL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVBEQload", argLength: 2, reg: gpload, asm: "MOVBEQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVBEQstore", argLength: 3, reg: gpstore, asm: "MOVBEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ // indexed MOVBE loads
+ {name: "MOVBELloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBEL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBELloadidx4", argLength: 3, reg: gploadidx, asm: "MOVBEL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBELloadidx8", argLength: 3, reg: gploadidx, asm: "MOVBEL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+8*arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBEQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBEQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load and swap 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVBEQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVBEQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load and swap 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
+ // indexed MOVBE stores
+ {name: "MOVBEWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEW", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVBEWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVBEW", scale: 2, aux: "SymOff", symEffect: "Write"}, // swap and store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVBELstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEL", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVBELstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVBEL", scale: 4, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ {name: "MOVBELstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVBEL", scale: 8, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+ {name: "MOVBEQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEQ", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVBEQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVBEQ", scale: 8, aux: "SymOff", symEffect: "Write"}, // swap and store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
// CPUID feature: BMI2.
+ {name: "SARXQ", argLength: 2, reg: gp21, asm: "SARXQ"}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SARXL", argLength: 2, reg: gp21, asm: "SARXL"}, // signed int32(arg0) >> arg1, shift amount is mod 32
+ {name: "SHLXQ", argLength: 2, reg: gp21, asm: "SHLXQ"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SHLXL", argLength: 2, reg: gp21, asm: "SHLXL"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHRXQ", argLength: 2, reg: gp21, asm: "SHRXQ"}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SHRXL", argLength: 2, reg: gp21, asm: "SHRXL"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 32
+
+ {name: "SARXLload", argLength: 3, reg: gp21shxload, asm: "SARXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 32
+ {name: "SARXQload", argLength: 3, reg: gp21shxload, asm: "SARXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 64
{name: "SHLXLload", argLength: 3, reg: gp21shxload, asm: "SHLXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+auxint+aux) << arg1, arg2=mem, shift amount is mod 32
{name: "SHLXQload", argLength: 3, reg: gp21shxload, asm: "SHLXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+auxint+aux) << arg1, arg2=mem, shift amount is mod 64
{name: "SHRXLload", argLength: 3, reg: gp21shxload, asm: "SHRXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 32
{name: "SHRXQload", argLength: 3, reg: gp21shxload, asm: "SHRXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 64
+ {name: "SARXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SARXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+4*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SARXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SARXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SARXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64
+ {name: "SARXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SARXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64
{name: "SHLXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+1*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32
{name: "SHLXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+4*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32
{name: "SHLXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+8*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32
@@ -973,6 +1001,12 @@ func init() {
{name: "NEF", controls: 1},
{name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
{name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+
+ // JUMPTABLE implements jump tables.
+ // Aux is the symbol (an *obj.LSym) for the jump table.
+ // control[0] is the index into the jump table.
+ // control[1] is the address of the jump table (the address of the symbol stored in Aux).
+ {name: "JUMPTABLE", controls: 2, aux: "Sym"},
}
archs = append(archs, arch{
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 6dbe9b47d0..d5cc107fab 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -515,15 +515,18 @@
// simplifications
(Or(64|32|16|8) x x) => x
-(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
(Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1])
+(Or(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
(And(64|32|16|8) x x) => x
(And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x
-(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+(And(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [0])
(Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0])
(Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Xor(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
(Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
(Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0])
@@ -533,6 +536,13 @@
(Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c])
(Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x)
+(Add(64|32|16|8) x (Neg(64|32|16|8) y)) => (Sub(64|32|16|8) x y)
+
+(Xor(64|32|16|8) (Const(64|32|16|8) [-1]) x) => (Com(64|32|16|8) x)
+
+(Sub(64|32|16|8) (Neg(64|32|16|8) x) (Com(64|32|16|8) x)) => (Const(64|32|16|8) [1])
+(Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1])
+(Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
// ^(x-1) == ^x+1 == -x
(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x)
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 4f133b1ff6..e04b7db6e7 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -639,12 +639,13 @@ var genericOps = []opData{
// First [] [always, never]
var genericBlocks = []blockData{
- {name: "Plain"}, // a single successor
- {name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
- {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
- {name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
- {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call
- {name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
+ {name: "Plain"}, // a single successor
+ {name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
+ {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
+ {name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
+ {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call
+ {name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
+ {name: "JumpTable", controls: 1}, // multiple successors, the integer Controls[0] selects which one
// transient block state used for dead code removal
{name: "First"}, // 2 successors, always takes the first one (second is dead)
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index fe8db4ed1f..0f7e970372 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -1838,6 +1838,8 @@ func (op opData) auxIntType() string {
// auxType returns the Go type that this block should store in its aux field.
func (b blockData) auxType() string {
switch b.aux {
+ case "Sym":
+ return "Sym"
case "S390XCCMask", "S390XCCMaskInt8", "S390XCCMaskUint8":
return "s390x.CCMask"
case "S390XRotateParams":
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index d69db404ed..00aea87936 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -46,19 +46,19 @@ func (r *Register) GCNum() int16 {
// variable that has been decomposed into multiple stack slots.
// As an example, a string could have the following configurations:
//
-// stack layout LocalSlots
+// stack layout LocalSlots
//
-// Optimizations are disabled. s is on the stack and represented in its entirety.
-// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
+// Optimizations are disabled. s is on the stack and represented in its entirety.
+// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
//
-// s was not decomposed, but the SSA operates on its parts individually, so
-// there is a LocalSlot for each of its fields that points into the single stack slot.
-// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
+// s was not decomposed, but the SSA operates on its parts individually, so
+// there is a LocalSlot for each of its fields that points into the single stack slot.
+// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
//
-// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
-// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
-// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
-// parent = &{N: s, Type: string}
+// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
+// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
+// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
+// parent = &{N: s, Type: string}
type LocalSlot struct {
N *ir.Name // an ONAME *ir.Name representing a stack location.
Type *types.Type // type of slot
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
index 206aab2c5e..dd63541771 100644
--- a/src/cmd/compile/internal/ssa/loopbce.go
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -31,9 +31,10 @@ type indVar struct {
// parseIndVar checks whether the SSA value passed as argument is a valid induction
// variable, and, if so, extracts:
-// * the minimum bound
-// * the increment value
-// * the "next" value (SSA value that is Phi'd into the induction variable every loop)
+// - the minimum bound
+// - the increment value
+// - the "next" value (SSA value that is Phi'd into the induction variable every loop)
+//
// Currently, we detect induction variables that match (Phi min nxt),
// with nxt being (Add inc ind).
// If it can't parse the induction variable correctly, it returns (nil, nil, nil).
@@ -66,19 +67,18 @@ func parseIndVar(ind *Value) (min, inc, nxt *Value) {
//
// Look for variables and blocks that satisfy the following
//
-// loop:
-// ind = (Phi min nxt),
-// if ind < max
-// then goto enter_loop
-// else goto exit_loop
+// loop:
+// ind = (Phi min nxt),
+// if ind < max
+// then goto enter_loop
+// else goto exit_loop
//
-// enter_loop:
-// do something
-// nxt = inc + ind
-// goto loop
-//
-// exit_loop:
+// enter_loop:
+// do something
+// nxt = inc + ind
+// goto loop
//
+// exit_loop:
//
// TODO: handle 32 bit operations
func findIndVar(f *Func) []indVar {
diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go
index 35010a78d8..2eefda1c8b 100644
--- a/src/cmd/compile/internal/ssa/looprotate.go
+++ b/src/cmd/compile/internal/ssa/looprotate.go
@@ -8,19 +8,19 @@ package ssa
// to loops with a check-loop-condition-at-end.
// This helps loops avoid extra unnecessary jumps.
//
-// loop:
-// CMPQ ...
-// JGE exit
-// ...
-// JMP loop
-// exit:
+// loop:
+// CMPQ ...
+// JGE exit
+// ...
+// JMP loop
+// exit:
//
-// JMP entry
-// loop:
-// ...
-// entry:
-// CMPQ ...
-// JLT loop
+// JMP entry
+// loop:
+// ...
+// entry:
+// CMPQ ...
+// JLT loop
func loopRotate(f *Func) {
loopnest := f.loopnest()
if loopnest.hasIrreducible {
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
index 93f8801bce..e903d92bb6 100644
--- a/src/cmd/compile/internal/ssa/magic.go
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -110,7 +110,8 @@ type umagicData struct {
// umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c).
// The return values satisfy for all 0 <= x < 2^n
-// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
+//
+// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
func umagic(n uint, c int64) umagicData {
// Convert from ConstX auxint values to the real uint64 constant they represent.
d := uint64(c) << (64 - n) >> (64 - n)
@@ -183,7 +184,8 @@ type smagicData struct {
// magic computes the constants needed to strength reduce signed n-bit divides by the constant c.
// Must have c>0.
// The return values satisfy for all -2^(n-1) <= x < 2^(n-1)
-// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
+//
+// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
func smagic(n uint, c int64) smagicData {
C := new(big.Int).SetInt64(c)
s := C.BitLen() - 1
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index a1835dcd30..a3e8dcd2f6 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -391,9 +391,9 @@ const (
// A Sym represents a symbolic offset from a base register.
// Currently a Sym can be one of 3 things:
-// - a *gc.Node, for an offset from SP (the stack pointer)
-// - a *obj.LSym, for an offset from SB (the global pointer)
-// - nil, for no offset
+// - a *gc.Node, for an offset from SP (the stack pointer)
+// - a *obj.LSym, for an offset from SB (the global pointer)
+// - nil, for no offset
type Sym interface {
CanBeAnSSASym()
CanBeAnSSAAux()
@@ -479,12 +479,13 @@ const (
)
// boundsAPI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do:
-// CMPQ c, cap
-// JA fail1
-// CMPQ b, c
-// JA fail2
-// CMPQ a, b
-// JA fail3
+//
+// CMPQ c, cap
+// JA fail1
+// CMPQ b, c
+// JA fail2
+// CMPQ a, b
+// JA fail3
//
// fail1: CALL panicSlice3Acap (c, cap)
// fail2: CALL panicSlice3B (b, c)
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 005a033a40..0357fdb12a 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -50,6 +50,7 @@ const (
BlockAMD64NEF
BlockAMD64ORD
BlockAMD64NAN
+ BlockAMD64JUMPTABLE
BlockARMEQ
BlockARMNE
@@ -149,6 +150,7 @@ const (
BlockRet
BlockRetJmp
BlockExit
+ BlockJumpTable
BlockFirst
)
@@ -172,22 +174,23 @@ var blockString = [...]string{
Block386ORD: "ORD",
Block386NAN: "NAN",
- BlockAMD64EQ: "EQ",
- BlockAMD64NE: "NE",
- BlockAMD64LT: "LT",
- BlockAMD64LE: "LE",
- BlockAMD64GT: "GT",
- BlockAMD64GE: "GE",
- BlockAMD64OS: "OS",
- BlockAMD64OC: "OC",
- BlockAMD64ULT: "ULT",
- BlockAMD64ULE: "ULE",
- BlockAMD64UGT: "UGT",
- BlockAMD64UGE: "UGE",
- BlockAMD64EQF: "EQF",
- BlockAMD64NEF: "NEF",
- BlockAMD64ORD: "ORD",
- BlockAMD64NAN: "NAN",
+ BlockAMD64EQ: "EQ",
+ BlockAMD64NE: "NE",
+ BlockAMD64LT: "LT",
+ BlockAMD64LE: "LE",
+ BlockAMD64GT: "GT",
+ BlockAMD64GE: "GE",
+ BlockAMD64OS: "OS",
+ BlockAMD64OC: "OC",
+ BlockAMD64ULT: "ULT",
+ BlockAMD64ULE: "ULE",
+ BlockAMD64UGT: "UGT",
+ BlockAMD64UGE: "UGE",
+ BlockAMD64EQF: "EQF",
+ BlockAMD64NEF: "NEF",
+ BlockAMD64ORD: "ORD",
+ BlockAMD64NAN: "NAN",
+ BlockAMD64JUMPTABLE: "JUMPTABLE",
BlockARMEQ: "EQ",
BlockARMNE: "NE",
@@ -281,13 +284,14 @@ var blockString = [...]string{
BlockS390XCLIJ: "CLIJ",
BlockS390XCLGIJ: "CLGIJ",
- BlockPlain: "Plain",
- BlockIf: "If",
- BlockDefer: "Defer",
- BlockRet: "Ret",
- BlockRetJmp: "RetJmp",
- BlockExit: "Exit",
- BlockFirst: "First",
+ BlockPlain: "Plain",
+ BlockIf: "If",
+ BlockDefer: "Defer",
+ BlockRet: "Ret",
+ BlockRetJmp: "RetJmp",
+ BlockExit: "Exit",
+ BlockJumpTable: "JumpTable",
+ BlockFirst: "First",
}
func (k BlockKind) String() string { return blockString[k] }
@@ -1050,10 +1054,35 @@ const (
OpAMD64MOVBELstore
OpAMD64MOVBEQload
OpAMD64MOVBEQstore
+ OpAMD64MOVBELloadidx1
+ OpAMD64MOVBELloadidx4
+ OpAMD64MOVBELloadidx8
+ OpAMD64MOVBEQloadidx1
+ OpAMD64MOVBEQloadidx8
+ OpAMD64MOVBEWstoreidx1
+ OpAMD64MOVBEWstoreidx2
+ OpAMD64MOVBELstoreidx1
+ OpAMD64MOVBELstoreidx4
+ OpAMD64MOVBELstoreidx8
+ OpAMD64MOVBEQstoreidx1
+ OpAMD64MOVBEQstoreidx8
+ OpAMD64SARXQ
+ OpAMD64SARXL
+ OpAMD64SHLXQ
+ OpAMD64SHLXL
+ OpAMD64SHRXQ
+ OpAMD64SHRXL
+ OpAMD64SARXLload
+ OpAMD64SARXQload
OpAMD64SHLXLload
OpAMD64SHLXQload
OpAMD64SHRXLload
OpAMD64SHRXQload
+ OpAMD64SARXLloadidx1
+ OpAMD64SARXLloadidx4
+ OpAMD64SARXLloadidx8
+ OpAMD64SARXQloadidx1
+ OpAMD64SARXQloadidx8
OpAMD64SHLXLloadidx1
OpAMD64SHLXLloadidx4
OpAMD64SHLXLloadidx8
@@ -13910,6 +13939,319 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "MOVBELloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEQloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEQloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVBEQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEQstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SARXQ",
+ argLen: 2,
+ asm: x86.ASARXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXL",
+ argLen: 2,
+ asm: x86.ASARXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXQ",
+ argLen: 2,
+ asm: x86.ASHLXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXL",
+ argLen: 2,
+ asm: x86.ASHLXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXQ",
+ argLen: 2,
+ asm: x86.ASHRXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXL",
+ argLen: 2,
+ asm: x86.ASHRXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
{
name: "SHLXLload",
auxType: auxSymOff,
@@ -13978,6 +14320,101 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "SARXLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
{
name: "SHLXLloadidx1",
auxType: auxSymOff,
diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go
index 761cb7a392..4fc942375f 100644
--- a/src/cmd/compile/internal/ssa/phielim.go
+++ b/src/cmd/compile/internal/ssa/phielim.go
@@ -8,13 +8,19 @@ package ssa
// A phi is redundant if its arguments are all equal. For
// purposes of counting, ignore the phi itself. Both of
// these phis are redundant:
-// v = phi(x,x,x)
-// v = phi(x,v,x,v)
+//
+// v = phi(x,x,x)
+// v = phi(x,v,x,v)
+//
// We repeat this process to also catch situations like:
-// v = phi(x, phi(x, x), phi(x, v))
+//
+// v = phi(x, phi(x, x), phi(x, v))
+//
// TODO: Can we also simplify cases like:
-// v = phi(v, w, x)
-// w = phi(v, w, x)
+//
+// v = phi(v, w, x)
+// w = phi(v, w, x)
+//
// and would that be useful?
func phielim(f *Func) {
for {
diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go
index 0357442ae9..037845eacf 100644
--- a/src/cmd/compile/internal/ssa/phiopt.go
+++ b/src/cmd/compile/internal/ssa/phiopt.go
@@ -7,20 +7,22 @@ package ssa
// phiopt eliminates boolean Phis based on the previous if.
//
// Main use case is to transform:
-// x := false
-// if b {
-// x = true
-// }
+//
+// x := false
+// if b {
+// x = true
+// }
+//
// into x = b.
//
// In SSA code this appears as
//
-// b0
-// If b -> b1 b2
-// b1
-// Plain -> b2
-// b2
-// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
+// b0
+// If b -> b1 b2
+// b1
+// Plain -> b2
+// b2
+// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
//
// In this case we can replace x with a copy of b.
func phiopt(f *Func) {
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index 200106e66d..a3b4f0fea4 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -140,11 +140,11 @@ type posetNode struct {
// to record that A= w {
-// newR := r & (eq|gt)
-// }
-// if v != w {
-// newR := r & (lt|gt)
-// }
+// if v < w {
+// newR := r & lt
+// }
+// if v >= w {
+// newR := r & (eq|gt)
+// }
+// if v != w {
+// newR := r & (lt|gt)
+// }
type relation uint
const (
@@ -746,19 +750,19 @@ func (ft *factsTable) cleanup(f *Func) {
// By far, the most common redundant pair are generated by bounds checking.
// For example for the code:
//
-// a[i] = 4
-// foo(a[i])
+// a[i] = 4
+// foo(a[i])
//
// The compiler will generate the following code:
//
-// if i >= len(a) {
-// panic("not in bounds")
-// }
-// a[i] = 4
-// if i >= len(a) {
-// panic("not in bounds")
-// }
-// foo(a[i])
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// a[i] = 4
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// foo(a[i])
//
// The second comparison i >= len(a) is clearly redundant because if the
// else branch of the first comparison is executed, we already know that i < len(a).
@@ -940,20 +944,31 @@ func prove(f *Func) {
// getBranch returns the range restrictions added by p
// when reaching b. p is the immediate dominator of b.
func getBranch(sdom SparseTree, p *Block, b *Block) branch {
- if p == nil || p.Kind != BlockIf {
+ if p == nil {
return unknown
}
- // If p and p.Succs[0] are dominators it means that every path
- // from entry to b passes through p and p.Succs[0]. We care that
- // no path from entry to b passes through p.Succs[1]. If p.Succs[0]
- // has one predecessor then (apart from the degenerate case),
- // there is no path from entry that can reach b through p.Succs[1].
- // TODO: how about p->yes->b->yes, i.e. a loop in yes.
- if sdom.IsAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 {
- return positive
- }
- if sdom.IsAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 {
- return negative
+ switch p.Kind {
+ case BlockIf:
+ // If p and p.Succs[0] are dominators it means that every path
+ // from entry to b passes through p and p.Succs[0]. We care that
+ // no path from entry to b passes through p.Succs[1]. If p.Succs[0]
+ // has one predecessor then (apart from the degenerate case),
+ // there is no path from entry that can reach b through p.Succs[1].
+ // TODO: how about p->yes->b->yes, i.e. a loop in yes.
+ if sdom.IsAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 {
+ return positive
+ }
+ if sdom.IsAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 {
+ return negative
+ }
+ case BlockJumpTable:
+ // TODO: this loop can lead to quadratic behavior, as
+ // getBranch can be called len(p.Succs) times.
+ for i, e := range p.Succs {
+ if sdom.IsAncestorEq(e.b, b) && len(e.b.Preds) == 1 {
+ return jumpTable0 + branch(i)
+ }
+ }
}
return unknown
}
@@ -984,11 +999,36 @@ func addIndVarRestrictions(ft *factsTable, b *Block, iv indVar) {
// branching from Block b in direction br.
func addBranchRestrictions(ft *factsTable, b *Block, br branch) {
c := b.Controls[0]
- switch br {
- case negative:
+ switch {
+ case br == negative:
addRestrictions(b, ft, boolean, nil, c, eq)
- case positive:
+ case br == positive:
addRestrictions(b, ft, boolean, nil, c, lt|gt)
+ case br >= jumpTable0:
+ idx := br - jumpTable0
+ val := int64(idx)
+ if v, off := isConstDelta(c); v != nil {
+ // Establish the bound on the underlying value we're switching on,
+ // not on the offset-ed value used as the jump table index.
+ c = v
+ val -= off
+ }
+ old, ok := ft.limits[c.ID]
+ if !ok {
+ old = noLimit
+ }
+ ft.limitStack = append(ft.limitStack, limitFact{c.ID, old})
+ if val < old.min || val > old.max || uint64(val) < old.umin || uint64(val) > old.umax {
+ ft.unsat = true
+ if b.Func.pass.debug > 2 {
+ b.Func.Warnl(b.Pos, "block=%s outedge=%d %s=%d unsat", b, idx, c, val)
+ }
+ } else {
+ ft.limits[c.ID] = limit{val, val, uint64(val), uint64(val)}
+ if b.Func.pass.debug > 2 {
+ b.Func.Warnl(b.Pos, "block=%s outedge=%d %s=%d", b, idx, c, val)
+ }
+ }
default:
panic("unknown branch")
}
@@ -1343,10 +1383,14 @@ func removeBranch(b *Block, branch branch) {
// attempt to preserve statement marker.
b.Pos = b.Pos.WithIsStmt()
}
- b.Kind = BlockFirst
- b.ResetControls()
- if branch == positive {
- b.swapSuccessors()
+ if branch == positive || branch == negative {
+ b.Kind = BlockFirst
+ b.ResetControls()
+ if branch == positive {
+ b.swapSuccessors()
+ }
+ } else {
+ // TODO: figure out how to remove an entry from a jump table
}
}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index eb8fa0c02a..4d615a064d 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -962,8 +963,9 @@ found:
// clobber invalidates values. Returns true.
// clobber is used by rewrite rules to:
-// A) make sure the values are really dead and never used again.
-// B) decrement use counts of the values' args.
+//
+// A) make sure the values are really dead and never used again.
+// B) decrement use counts of the values' args.
func clobber(vv ...*Value) bool {
for _, v := range vv {
v.reset(OpInvalid)
@@ -985,7 +987,9 @@ func clobberIfDead(v *Value) bool {
// noteRule is an easy way to track if a rule is matched when writing
// new ones. Make the rule of interest also conditional on
-// noteRule("note to self: rule of interest matched")
+//
+// noteRule("note to self: rule of interest matched")
+//
// and that message will print when the rule matches.
func noteRule(s string) bool {
fmt.Println(s)
@@ -1789,9 +1793,11 @@ func sequentialAddresses(x, y *Value, n int64) bool {
// We happen to match the semantics to those of arm/arm64.
// Note that these semantics differ from x86: the carry flag has the opposite
// sense on a subtraction!
-// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
-// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
-// (because it does x + ^y + C).
+//
+// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
+// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
+// (because it does x + ^y + C).
+//
// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag
type flagConstant uint8
@@ -1949,3 +1955,9 @@ func logicFlags32(x int32) flagConstant {
fcb.N = x < 0
return fcb.encode()
}
+
+func makeJumpTableSym(b *Block) *obj.LSym {
+ s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.LSym(), b.ID))
+ s.Set(obj.AttrDuplicateOK, true)
+ return s
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index addfaaa3a8..36e69781a5 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -382,6 +382,14 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64SARW(v)
case OpAMD64SARWconst:
return rewriteValueAMD64_OpAMD64SARWconst(v)
+ case OpAMD64SARXL:
+ return rewriteValueAMD64_OpAMD64SARXL(v)
+ case OpAMD64SARXLload:
+ return rewriteValueAMD64_OpAMD64SARXLload(v)
+ case OpAMD64SARXQ:
+ return rewriteValueAMD64_OpAMD64SARXQ(v)
+ case OpAMD64SARXQload:
+ return rewriteValueAMD64_OpAMD64SARXQload(v)
case OpAMD64SBBLcarrymask:
return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
case OpAMD64SBBQ:
@@ -438,6 +446,14 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64SHLQ(v)
case OpAMD64SHLQconst:
return rewriteValueAMD64_OpAMD64SHLQconst(v)
+ case OpAMD64SHLXL:
+ return rewriteValueAMD64_OpAMD64SHLXL(v)
+ case OpAMD64SHLXLload:
+ return rewriteValueAMD64_OpAMD64SHLXLload(v)
+ case OpAMD64SHLXQ:
+ return rewriteValueAMD64_OpAMD64SHLXQ(v)
+ case OpAMD64SHLXQload:
+ return rewriteValueAMD64_OpAMD64SHLXQload(v)
case OpAMD64SHRB:
return rewriteValueAMD64_OpAMD64SHRB(v)
case OpAMD64SHRBconst:
@@ -454,6 +470,14 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64SHRW(v)
case OpAMD64SHRWconst:
return rewriteValueAMD64_OpAMD64SHRWconst(v)
+ case OpAMD64SHRXL:
+ return rewriteValueAMD64_OpAMD64SHRXL(v)
+ case OpAMD64SHRXLload:
+ return rewriteValueAMD64_OpAMD64SHRXLload(v)
+ case OpAMD64SHRXQ:
+ return rewriteValueAMD64_OpAMD64SHRXQ(v)
+ case OpAMD64SHRXQload:
+ return rewriteValueAMD64_OpAMD64SHRXQload(v)
case OpAMD64SUBL:
return rewriteValueAMD64_OpAMD64SUBL(v)
case OpAMD64SUBLconst:
@@ -2700,6 +2724,29 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
}
break
}
+ // match: (ANDL (NOTL (SHLXL (MOVLconst [1]) y)) x)
+ // result: (BTRL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTL {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ANDL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
// result: (BTRLconst [int8(log32(^c))] x)
@@ -3117,6 +3164,22 @@ func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (ANDNL x (SHLXL (MOVLconst [1]) y))
+ // result: (BTRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLXL {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
@@ -3138,6 +3201,22 @@ func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (ANDNQ x (SHLXQ (MOVQconst [1]) y))
+ // result: (BTRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLXQ {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
@@ -3166,6 +3245,29 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
break
}
+ // match: (ANDQ (NOTQ (SHLXQ (MOVQconst [1]) y)) x)
+ // result: (BTRQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTQ {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ANDQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
// result: (BTRQconst [int8(log64(^c))] x)
@@ -3869,6 +3971,22 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
v.AddArg2(y, x)
return true
}
+ // match: (BTLconst [0] s:(SHRXQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
// match: (BTLconst [c] (SHRLconst [d] x))
// cond: (c+d)<32
// result: (BTLconst [c+d] x)
@@ -3921,6 +4039,22 @@ func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
v.AddArg2(y, x)
return true
}
+ // match: (BTLconst [0] s:(SHRXL x y))
+ // result: (BTL y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTL)
+ v.AddArg2(y, x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
@@ -3977,6 +4111,22 @@ func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
v.AddArg2(y, x)
return true
}
+ // match: (BTQconst [0] s:(SHRXQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
@@ -15886,6 +16036,25 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
break
}
+ // match: (ORL (SHLXL (MOVLconst [1]) y) x)
+ // result: (BTSL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
// result: (BTSLconst [int8(log32(c))] x)
@@ -16196,6 +16365,206 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
break
}
+ // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
// cond: v.Type.Size() == 2
// result: (ROLW x y)
@@ -16404,6 +16773,214 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
break
}
+ // match: (ORL (SHLXL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLXL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDQconst y [15])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDLconst y [15])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
// cond: v.Type.Size() == 1
// result: (ROLB x y)
@@ -16612,6 +17189,214 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
}
break
}
+ // match: (ORL (SHLXL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLXL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORL x x)
// result: x
for {
@@ -17505,6 +18290,25 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
break
}
+ // match: (ORQ (SHLXQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
// result: (BTSQconst [int8(log64(c))] x)
@@ -17785,6 +18589,206 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
break
}
+ // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
// result: (SHRDQ lo hi bits)
for {
@@ -17833,6 +18837,54 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
break
}
+ // match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits)))
+ // result: (SHRDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHRDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits)))
+ // result: (SHLDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHLDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
// match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
// result: (MOVQconst [c|d])
for {
@@ -19844,6 +20896,19 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SARL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SARXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SARL x (MOVQconst [c]))
// result: (SARLconst [int8(c&31)] x)
for {
@@ -20066,6 +21131,19 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SARQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SARXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SARQ x (MOVQconst [c]))
// result: (SARQconst [int8(c&63)] x)
for {
@@ -20341,6 +21419,518 @@ func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64SARXL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARXL x (MOVQconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARXL x (MOVLconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARXL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXL x (NEGQ (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARXL x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXL x (NEGQ (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARXL x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXL x (NEGL (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARXL x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXL x (NEGL (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARXL x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SARXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SARXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARXQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARXQ x (MOVQconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARXQ x (MOVLconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARXQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXQ x (NEGQ (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARXQ x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXQ x (NEGQ (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARXQ x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXQ x (NEGL (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARXQ x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARXQ x (NEGL (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARXQ x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARXQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SARXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SARXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
v_0 := v.Args[0]
// match: (SBBLcarrymask (FlagEQ))
@@ -21596,6 +23186,60 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
}
break
}
+ // match: (SETEQ (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (SETAE (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (SETAE (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
// match: (SETEQ (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETAE (BTLconst [int8(log32(c))] x))
@@ -22021,6 +23665,72 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
}
break
}
+ // match: (SETEQstore [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
// match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
@@ -23512,6 +25222,60 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
}
break
}
+ // match: (SETNE (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (SETB (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (SETB (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
// match: (SETNE (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETB (BTLconst [int8(log32(c))] x))
@@ -23937,6 +25701,72 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
}
break
}
+ // match: (SETNEstore [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
// match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
@@ -24451,6 +26281,19 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHLL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHLL x (MOVQconst [c]))
// result: (SHLLconst [int8(c&31)] x)
for {
@@ -24641,28 +26484,6 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHLXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHLXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
@@ -24707,6 +26528,19 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHLQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHLQ x (MOVQconst [c]))
// result: (SHLQconst [int8(c&63)] x)
for {
@@ -24897,28 +26731,6 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHLXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHLXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
@@ -24971,6 +26783,518 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64SHLXL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLXL x (MOVQconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXL x (MOVLconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGQ (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLXL x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGQ (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLXL x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGL (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLXL x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGL (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLXL x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLXQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLXQ x (MOVQconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXQ x (MOVLconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGQ (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGQ (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGL (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGL (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -25058,6 +27382,19 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHRL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHRL x (MOVQconst [c]))
// result: (SHRLconst [int8(c&31)] x)
for {
@@ -25248,28 +27585,6 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHRXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHRXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
@@ -25302,6 +27617,19 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHRQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHRQ x (MOVQconst [c]))
// result: (SHRQconst [int8(c&63)] x)
for {
@@ -25492,28 +27820,6 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHRXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHRXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
@@ -25625,6 +27931,518 @@ func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64SHRXL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRXL x (MOVQconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXL x (MOVLconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGQ (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRXL x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGQ (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRXL x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGL (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRXL x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGL (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRXL x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRXQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRXQ x (MOVQconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXQ x (MOVLconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGQ (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGQ (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x (NEGQ y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGL (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGL (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x (NEGL y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -26908,6 +29726,25 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
}
break
}
+ // match: (XORL (SHLXL (MOVLconst [1]) y) x)
+ // result: (BTCL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (XORL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
// result: (BTCLconst [int8(log32(c))] x)
@@ -27445,6 +30282,25 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
}
break
}
+ // match: (XORQ (SHLXQ (MOVQconst [1]) y) x)
+ // result: (BTCQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (XORQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
// result: (BTCQconst [int8(log64(c))] x)
@@ -34017,6 +36873,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
return false
}
func rewriteBlockAMD64(b *Block) bool {
+ typ := &b.Func.Config.Types
switch b.Kind {
case BlockAMD64EQ:
// match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
@@ -34067,6 +36924,54 @@ func rewriteBlockAMD64(b *Block) bool {
}
break
}
+ // match: (EQ (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (UGE (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (UGE (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
// match: (EQ (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (UGE (BTLconst [int8(log32(c))] x))
@@ -34551,6 +37456,19 @@ func rewriteBlockAMD64(b *Block) bool {
b.resetWithControl(BlockAMD64NE, v0)
return true
}
+ case BlockJumpTable:
+ // match: (JumpTable idx)
+ // result: (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ {makeJumpTableSym(b)} (SB)))
+ for {
+ idx := b.Controls[0]
+ v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
+ v0.Aux = symToAux(makeJumpTableSym(b))
+ v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
+ b.Aux = symToAux(makeJumpTableSym(b))
+ return true
+ }
case BlockAMD64LE:
// match: (LE (InvertFlags cmp) yes no)
// result: (GE cmp yes no)
@@ -34870,6 +37788,54 @@ func rewriteBlockAMD64(b *Block) bool {
}
break
}
+ // match: (NE (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (ULT (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (ULT (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
// match: (NE (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (ULT (BTLconst [int8(log32(c))] x))
diff --git a/src/cmd/compile/internal/ssa/rewriteCond_test.go b/src/cmd/compile/internal/ssa/rewriteCond_test.go
index 2c26fdf142..ca74ed5947 100644
--- a/src/cmd/compile/internal/ssa/rewriteCond_test.go
+++ b/src/cmd/compile/internal/ssa/rewriteCond_test.go
@@ -68,8 +68,10 @@ func TestCondRewrite(t *testing.T) {
}
// Profile the aforementioned optimization from two angles:
-// SoloJump: generated branching code has one 'jump', for '<' and '>='
-// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
+//
+// SoloJump: generated branching code has one 'jump', for '<' and '>='
+// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
+//
// We expect that 'CombJump' is generally on par with the non-optimized code, and
// 'SoloJump' demonstrates some improvement.
// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index fbf227562a..f61b6ca3ec 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -519,6 +519,38 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool {
}
break
}
+ // match: (Add16 x (Neg16 y))
+ // result: (Sub16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Add16 (Const16 [1]) (Com16 x))
// result: (Neg16 x)
for {
@@ -764,6 +796,38 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool {
}
break
}
+ // match: (Add32 x (Neg32 y))
+ // result: (Sub32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Com32 x) x)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Add32 (Const32 [1]) (Com32 x))
// result: (Neg32 x)
for {
@@ -1036,6 +1100,38 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool {
}
break
}
+ // match: (Add64 x (Neg64 y))
+ // result: (Sub64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Com64 x) x)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Add64 (Const64 [1]) (Com64 x))
// result: (Neg64 x)
for {
@@ -1308,6 +1404,38 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool {
}
break
}
+ // match: (Add8 x (Neg8 y))
+ // result: (Sub8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Com8 x) x)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Add8 (Const8 [1]) (Com8 x))
// result: (Neg8 x)
for {
@@ -1630,6 +1758,23 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool {
}
break
}
+ // match: (And16 (Com16 x) x)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
// match: (And16 x (And16 x y))
// result: (And16 x y)
for {
@@ -1828,6 +1973,23 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool {
}
break
}
+ // match: (And32 (Com32 x) x)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
// match: (And32 x (And32 x y))
// result: (And32 x y)
for {
@@ -2026,6 +2188,23 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool {
}
break
}
+ // match: (And64 (Com64 x) x)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
// match: (And64 x (And64 x y))
// result: (And64 x y)
for {
@@ -2224,6 +2403,23 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool {
}
break
}
+ // match: (And8 (Com8 x) x)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
// match: (And8 x (And8 x y))
// result: (And8 x y)
for {
@@ -16964,6 +17160,23 @@ func rewriteValuegeneric_OpOr16(v *Value) bool {
}
break
}
+ // match: (Or16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Or16 x (Or16 x y))
// result: (Or16 x y)
for {
@@ -17142,6 +17355,23 @@ func rewriteValuegeneric_OpOr32(v *Value) bool {
}
break
}
+ // match: (Or32 (Com32 x) x)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Or32 x (Or32 x y))
// result: (Or32 x y)
for {
@@ -17320,6 +17550,23 @@ func rewriteValuegeneric_OpOr64(v *Value) bool {
}
break
}
+ // match: (Or64 (Com64 x) x)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Or64 x (Or64 x y))
// result: (Or64 x y)
for {
@@ -17498,6 +17745,23 @@ func rewriteValuegeneric_OpOr8(v *Value) bool {
}
break
}
+ // match: (Or8 (Com8 x) x)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
// match: (Or8 x (Or8 x y))
// result: (Or8 x y)
for {
@@ -22994,6 +23258,34 @@ func rewriteValuegeneric_OpSub16(v *Value) bool {
v.AuxInt = int16ToAuxInt(0)
return true
}
+ // match: (Sub16 (Neg16 x) (Com16 x))
+ // result: (Const16 [1])
+ for {
+ if v_0.Op != OpNeg16 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom16 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(1)
+ return true
+ }
+ // match: (Sub16 (Com16 x) (Neg16 x))
+ // result: (Const16 [-1])
+ for {
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg16 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
// match: (Sub16 (Add16 x y) x)
// result: y
for {
@@ -23309,6 +23601,34 @@ func rewriteValuegeneric_OpSub32(v *Value) bool {
v.AuxInt = int32ToAuxInt(0)
return true
}
+ // match: (Sub32 (Neg32 x) (Com32 x))
+ // result: (Const32 [1])
+ for {
+ if v_0.Op != OpNeg32 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom32 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (Sub32 (Com32 x) (Neg32 x))
+ // result: (Const32 [-1])
+ for {
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg32 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
// match: (Sub32 (Add32 x y) x)
// result: y
for {
@@ -23648,6 +23968,34 @@ func rewriteValuegeneric_OpSub64(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
+ // match: (Sub64 (Neg64 x) (Com64 x))
+ // result: (Const64 [1])
+ for {
+ if v_0.Op != OpNeg64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom64 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (Sub64 (Com64 x) (Neg64 x))
+ // result: (Const64 [-1])
+ for {
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg64 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
// match: (Sub64 (Add64 x y) x)
// result: y
for {
@@ -23987,6 +24335,34 @@ func rewriteValuegeneric_OpSub8(v *Value) bool {
v.AuxInt = int8ToAuxInt(0)
return true
}
+ // match: (Sub8 (Neg8 x) (Com8 x))
+ // result: (Const8 [1])
+ for {
+ if v_0.Op != OpNeg8 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom8 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(1)
+ return true
+ }
+ // match: (Sub8 (Com8 x) (Neg8 x))
+ // result: (Const8 [-1])
+ for {
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg8 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
// match: (Sub8 (Add8 x y) x)
// result: y
for {
@@ -24714,6 +25090,37 @@ func rewriteValuegeneric_OpXor16(v *Value) bool {
}
break
}
+ // match: (Xor16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 (Const16 [-1]) x)
+ // result: (Com16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (Xor16 x (Xor16 x y))
// result: y
for {
@@ -24845,6 +25252,37 @@ func rewriteValuegeneric_OpXor32(v *Value) bool {
}
break
}
+ // match: (Xor32 (Com32 x) x)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 (Const32 [-1]) x)
+ // result: (Com32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (Xor32 x (Xor32 x y))
// result: y
for {
@@ -24976,6 +25414,37 @@ func rewriteValuegeneric_OpXor64(v *Value) bool {
}
break
}
+ // match: (Xor64 (Com64 x) x)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 (Const64 [-1]) x)
+ // result: (Com64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (Xor64 x (Xor64 x y))
// result: y
for {
@@ -25107,6 +25576,37 @@ func rewriteValuegeneric_OpXor8(v *Value) bool {
}
break
}
+ // match: (Xor8 (Com8 x) x)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 (Const8 [-1]) x)
+ // result: (Com8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (Xor8 x (Xor8 x y))
// result: y
for {
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index c5130b2ee5..170d8b7095 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -338,13 +338,15 @@ func schedule(f *Func) {
// if v transitively depends on store s, v is ordered after s,
// otherwise v is ordered before s.
// Specifically, values are ordered like
-// store1
-// NilCheck that depends on store1
-// other values that depends on store1
-// store2
-// NilCheck that depends on store2
-// other values that depends on store2
-// ...
+//
+// store1
+// NilCheck that depends on store1
+// other values that depends on store1
+// store2
+// NilCheck that depends on store2
+// other values that depends on store2
+// ...
+//
// The order of non-store and non-NilCheck values are undefined
// (not necessarily dependency order). This should be cheaper
// than a full scheduling as done above.
diff --git a/src/cmd/compile/internal/ssa/shift_test.go b/src/cmd/compile/internal/ssa/shift_test.go
index 3876d8df12..06c2f6720f 100644
--- a/src/cmd/compile/internal/ssa/shift_test.go
+++ b/src/cmd/compile/internal/ssa/shift_test.go
@@ -85,7 +85,7 @@ func TestShiftToExtensionAMD64(t *testing.T) {
// makeShiftExtensionFunc generates a function containing:
//
-// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
+// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
//
// This may be equivalent to a sign or zero extension.
func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun {
diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go
index c0b9eacf41..5f1f892120 100644
--- a/src/cmd/compile/internal/ssa/shortcircuit.go
+++ b/src/cmd/compile/internal/ssa/shortcircuit.go
@@ -67,11 +67,11 @@ func shortcircuit(f *Func) {
//
// (1) Look for a CFG of the form
//
-// p other pred(s)
-// \ /
-// b
-// / \
-// t other succ
+// p other pred(s)
+// \ /
+// b
+// / \
+// t other succ
//
// in which b is an If block containing a single phi value with a single use (b's Control),
// which has a ConstBool arg.
@@ -80,21 +80,21 @@ func shortcircuit(f *Func) {
//
// Rewrite this into
//
-// p other pred(s)
-// | /
-// | b
-// |/ \
-// t u
+// p other pred(s)
+// | /
+// | b
+// |/ \
+// t u
//
// and remove the appropriate phi arg(s).
//
// (2) Look for a CFG of the form
//
-// p q
-// \ /
-// b
-// / \
-// t u
+// p q
+// \ /
+// b
+// / \
+// t u
//
// in which b is as described in (1).
// However, b may also contain other phi values.
diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go
index 732bb8e321..9f4e0007d3 100644
--- a/src/cmd/compile/internal/ssa/sparsetree.go
+++ b/src/cmd/compile/internal/ssa/sparsetree.go
@@ -210,6 +210,7 @@ func (t SparseTree) isAncestor(x, y *Block) bool {
// 1. If domorder(x) > domorder(y) then x does not dominate y.
// 2. If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y,
// then x does not dominate z.
+//
// Property (1) means that blocks sorted by domorder always have a maximal dominant block first.
// Property (2) allows searches for dominated blocks to exit early.
func (t SparseTree) domorder(x *Block) int32 {
diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go
index c930a205c1..1fd7b33d5f 100644
--- a/src/cmd/compile/internal/ssa/trim.go
+++ b/src/cmd/compile/internal/ssa/trim.go
@@ -130,11 +130,11 @@ func emptyBlock(b *Block) bool {
// trimmableBlock reports whether the block can be trimmed from the CFG,
// subject to the following criteria:
-// - it should not be the first block
-// - it should be BlockPlain
-// - it should not loop back to itself
-// - it either is the single predecessor of the successor block or
-// contains no actual instructions
+// - it should not be the first block
+// - it should be BlockPlain
+// - it should not loop back to itself
+// - it either is the single predecessor of the successor block or
+// contains no actual instructions
func trimmableBlock(b *Block) bool {
if b.Kind != BlockPlain || b == b.Func.Entry {
return false
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index 7b411a4612..8f125cef99 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -228,6 +228,7 @@ func (v *Value) auxString() string {
// If/when midstack inlining is enabled (-l=4), the compiler gets both larger and slower.
// Not-inlining this method is a help (*Value.reset and *Block.NewValue0 are similar).
+//
//go:noinline
func (v *Value) AddArg(w *Value) {
if v.Args == nil {
@@ -331,6 +332,7 @@ func (v *Value) resetArgs() {
// reset is called from most rewrite rules.
// Allowing it to be inlined increases the size
// of cmd/compile by almost 10%, and slows it down.
+//
//go:noinline
func (v *Value) reset(op Op) {
if v.InCache {
@@ -377,6 +379,7 @@ func (v *Value) invalidateRecursively() bool {
// copyOf is called from rewrite rules.
// It modifies v to be (Copy a).
+//
//go:noinline
func (v *Value) copyOf(a *Value) {
if v == a {
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index 21eee12c85..65ff960c84 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -486,7 +486,7 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va
inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3
// put arguments on stack
- off := config.ctxt.FixedFrameSize()
+ off := config.ctxt.Arch.FixedFrameSize
var argTypes []*types.Type
if typ != nil { // for typedmemmove
@@ -529,7 +529,7 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va
// issue call
call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil)))
call.AddArgs(wbargs...)
- call.AuxInt = off - config.ctxt.FixedFrameSize()
+ call.AuxInt = off - config.ctxt.Arch.FixedFrameSize
return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
}
@@ -629,7 +629,7 @@ func IsNewObject(v *Value) (mem *Value, ok bool) {
if v.Args[0].Args[0].Op != OpSP {
return nil, false
}
- if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value
+ if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value
return nil, false
}
return mem, true
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index 86d40e239d..825b32aa80 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -225,13 +225,13 @@ func StackOffset(slot ssa.LocalSlot) int32 {
switch n.Class {
case ir.PPARAM, ir.PPARAMOUT:
if !n.IsOutputParamInRegisters() {
- off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ off = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize
break
}
fallthrough // PPARAMOUT in registers allocates like an AUTO
case ir.PAUTO:
off = n.FrameOffset()
- if base.Ctxt.FixedFrameSize() == 0 {
+ if base.Ctxt.Arch.FixedFrameSize == 0 {
off -= int64(types.PtrSize)
}
if buildcfg.FramePointerEnabled {
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 883772b341..adb95445c4 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -286,10 +286,10 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
-// - Offset of the deferBits variable
-// - Number of defers in the function
-// - Information about each defer call, in reverse order of appearance in the function:
-// - Offset of the closure value to call
+// - Offset of the deferBits variable
+// - Number of defers in the function
+// - Information about each defer call, in reverse order of appearance in the function:
+// - Offset of the closure value to call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
x.Set(obj.AttrContentAddressable, true)
@@ -1861,6 +1861,84 @@ func (s *state) stmt(n ir.Node) {
}
s.startBlock(bEnd)
+ case ir.OJUMPTABLE:
+ n := n.(*ir.JumpTableStmt)
+
+ // Make blocks we'll need.
+ jt := s.f.NewBlock(ssa.BlockJumpTable)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ // The only thing that needs evaluating is the index we're looking up.
+ idx := s.expr(n.Idx)
+ unsigned := idx.Type.IsUnsigned()
+
+ // Extend so we can do everything in uintptr arithmetic.
+ t := types.Types[types.TUINTPTR]
+ idx = s.conv(nil, idx, idx.Type, t)
+
+ // The ending condition for the current block decides whether we'll use
+ // the jump table at all.
+ // We check that min <= idx <= max and jump around the jump table
+ // if that test fails.
+ // We implement min <= idx <= max with 0 <= idx-min <= max-min, because
+ // we'll need idx-min anyway as the control value for the jump table.
+ var min, max uint64
+ if unsigned {
+ min, _ = constant.Uint64Val(n.Cases[0])
+ max, _ = constant.Uint64Val(n.Cases[len(n.Cases)-1])
+ } else {
+ mn, _ := constant.Int64Val(n.Cases[0])
+ mx, _ := constant.Int64Val(n.Cases[len(n.Cases)-1])
+ min = uint64(mn)
+ max = uint64(mx)
+ }
+ // Compare idx-min with max-min, to see if we can use the jump table.
+ idx = s.newValue2(s.ssaOp(ir.OSUB, t), t, idx, s.uintptrConstant(min))
+ width := s.uintptrConstant(max - min)
+ cmp := s.newValue2(s.ssaOp(ir.OLE, t), types.Types[types.TBOOL], idx, width)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.AddEdgeTo(jt) // in range - use jump table
+ b.AddEdgeTo(bEnd) // out of range - no case in the jump table will trigger
+ b.Likely = ssa.BranchLikely // TODO: assumes missing the table entirely is unlikely. True?
+
+ // Build jump table block.
+ s.startBlock(jt)
+ jt.Pos = n.Pos()
+ if base.Flag.Cfg.SpectreIndex {
+ idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, width)
+ }
+ jt.SetControl(idx)
+
+ // Figure out where we should go for each index in the table.
+ table := make([]*ssa.Block, max-min+1)
+ for i := range table {
+ table[i] = bEnd // default target
+ }
+ for i := range n.Targets {
+ c := n.Cases[i]
+ lab := s.label(n.Targets[i])
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+ var val uint64
+ if unsigned {
+ val, _ = constant.Uint64Val(c)
+ } else {
+ vl, _ := constant.Int64Val(c)
+ val = uint64(vl)
+ }
+ // Overwrite the default target.
+ table[val-min] = lab.target
+ }
+ for _, t := range table {
+ jt.AddEdgeTo(t)
+ }
+ s.endBlock()
+
+ s.startBlock(bEnd)
+
case ir.OVARDEF:
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
@@ -2351,6 +2429,13 @@ func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
return x
}
+func (s *state) uintptrConstant(v uint64) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue0I(ssa.OpConst32, types.Types[types.TUINTPTR], int64(v))
+ }
+ return s.newValue0I(ssa.OpConst64, types.Types[types.TUINTPTR], int64(v))
+}
+
func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
@@ -5017,7 +5102,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
- argStart := base.Ctxt.FixedFrameSize()
+ argStart := base.Ctxt.Arch.FixedFrameSize
// Defer/go args.
if k != callNormal && k != callTail {
// Write closure (arg to newproc/deferproc).
@@ -5521,7 +5606,7 @@ func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
- off := base.Ctxt.FixedFrameSize()
+ off := base.Ctxt.Arch.FixedFrameSize
var callArgs []*ssa.Value
var callArgTypes []*types.Type
@@ -5535,13 +5620,6 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
}
off = types.Rnd(off, int64(types.RegSize))
- // Accumulate results types and offsets
- offR := off
- for _, t := range results {
- offR = types.Rnd(offR, t.Alignment())
- offR += t.Size()
- }
-
// Issue call
var call *ssa.Value
aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
@@ -5555,7 +5633,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
- call.AuxInt = off - base.Ctxt.FixedFrameSize()
+ call.AuxInt = off - base.Ctxt.Arch.FixedFrameSize
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
@@ -6447,6 +6525,9 @@ type State struct {
// and where they would like to go.
Branches []Branch
+ // JumpTables remembers all the jump tables we've seen.
+ JumpTables []*ssa.Block
+
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
@@ -7059,6 +7140,20 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
}
+ // Resolve jump table destinations.
+ for _, jt := range s.JumpTables {
+ // Convert from *Block targets to *Prog targets.
+ targets := make([]*obj.Prog, len(jt.Succs))
+ for i, e := range jt.Succs {
+ targets[i] = s.bstart[e.Block().ID]
+ }
+ // Add to list of jump tables to be resolved at assembly time.
+ // The assembler converts from *Prog entries to absolute addresses
+ // once it knows instruction byte offsets.
+ fi := pp.CurFunc.LSym.Func()
+ fi.JumpTables = append(fi.JumpTables, obj.JumpTable{Sym: jt.Aux.(*obj.LSym), Targets: targets})
+ }
+
if e.log { // spew to stdout
filename := ""
for p := pp.Text; p != nil; p = p.Link {
@@ -7712,6 +7807,10 @@ func (e *ssafn) MyImportPath() string {
return base.Ctxt.Pkgpath
}
+func (e *ssafn) LSym() string {
+ return e.curfn.LSym.Name
+}
+
func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT {
n := n.(*ir.SelectorExpr)
diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go
index 56e97c71d8..6079097426 100644
--- a/src/cmd/compile/internal/syntax/branches.go
+++ b/src/cmd/compile/internal/syntax/branches.go
@@ -11,10 +11,10 @@ import "fmt"
// checkBranches checks correct use of labels and branch
// statements (break, continue, goto) in a function body.
// It catches:
-// - misplaced breaks and continues
-// - bad labeled breaks and continues
-// - invalid, unused, duplicate, and missing labels
-// - gotos jumping over variable declarations and into blocks
+// - misplaced breaks and continues
+// - bad labeled breaks and continues
+// - invalid, unused, duplicate, and missing labels
+// - gotos jumping over variable declarations and into blocks
func checkBranches(body *BlockStmt, errh ErrorHandler) {
if body == nil {
return
diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go
index 884a983bdd..c736f970f9 100644
--- a/src/cmd/compile/internal/test/float_test.go
+++ b/src/cmd/compile/internal/test/float_test.go
@@ -170,6 +170,7 @@ func cvt8(a float32) int32 {
}
// make sure to cover int, uint cases (issue #16738)
+//
//go:noinline
func cvt9(a float64) int {
return int(a)
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index b10d37a17c..211068e1dc 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -136,6 +136,7 @@ func TestIntendedInlining(t *testing.T) {
"Value.CanSet",
"Value.CanInterface",
"Value.IsValid",
+ "Value.MapRange",
"Value.pointer",
"add",
"align",
diff --git a/src/cmd/compile/internal/test/shift_test.go b/src/cmd/compile/internal/test/shift_test.go
index ea88f0a70a..58c8dde1a0 100644
--- a/src/cmd/compile/internal/test/shift_test.go
+++ b/src/cmd/compile/internal/test/shift_test.go
@@ -1029,3 +1029,13 @@ func TestShiftGeneric(t *testing.T) {
}
}
}
+
+var shiftSink64 int64
+
+func BenchmarkShiftArithmeticRight(b *testing.B) {
+ x := shiftSink64
+ for i := 0; i < b.N; i++ {
+ x = x >> (i & 63)
+ }
+ shiftSink64 = x
+}
diff --git a/src/cmd/compile/internal/test/switch_test.go b/src/cmd/compile/internal/test/switch_test.go
new file mode 100644
index 0000000000..30dee6257e
--- /dev/null
+++ b/src/cmd/compile/internal/test/switch_test.go
@@ -0,0 +1,137 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "math/bits"
+ "testing"
+)
+
+func BenchmarkSwitch8Predictable(b *testing.B) {
+ benchmarkSwitch8(b, true)
+}
+func BenchmarkSwitch8Unpredictable(b *testing.B) {
+ benchmarkSwitch8(b, false)
+}
+func benchmarkSwitch8(b *testing.B, predictable bool) {
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch rng.value() & 7 {
+ case 0:
+ n += 1
+ case 1:
+ n += 2
+ case 2:
+ n += 3
+ case 3:
+ n += 4
+ case 4:
+ n += 5
+ case 5:
+ n += 6
+ case 6:
+ n += 7
+ case 7:
+ n += 8
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitch32Predictable(b *testing.B) {
+ benchmarkSwitch32(b, true)
+}
+func BenchmarkSwitch32Unpredictable(b *testing.B) {
+ benchmarkSwitch32(b, false)
+}
+func benchmarkSwitch32(b *testing.B, predictable bool) {
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch rng.value() & 31 {
+ case 0, 1, 2:
+ n += 1
+ case 4, 5, 6:
+ n += 2
+ case 8, 9, 10:
+ n += 3
+ case 12, 13, 14:
+ n += 4
+ case 16, 17, 18:
+ n += 5
+ case 20, 21, 22:
+ n += 6
+ case 24, 25, 26:
+ n += 7
+ case 28, 29, 30:
+ n += 8
+ default:
+ n += 9
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitchStringPredictable(b *testing.B) {
+ benchmarkSwitchString(b, true)
+}
+func BenchmarkSwitchStringUnpredictable(b *testing.B) {
+ benchmarkSwitchString(b, false)
+}
+func benchmarkSwitchString(b *testing.B, predictable bool) {
+ a := []string{
+ "foo",
+ "foo1",
+ "foo22",
+ "foo333",
+ "foo4444",
+ "foo55555",
+ "foo666666",
+ "foo7777777",
+ }
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch a[rng.value()&7] {
+ case "foo":
+ n += 1
+ case "foo1":
+ n += 2
+ case "foo22":
+ n += 3
+ case "foo333":
+ n += 4
+ case "foo4444":
+ n += 5
+ case "foo55555":
+ n += 6
+ case "foo666666":
+ n += 7
+ case "foo7777777":
+ n += 8
+ }
+ }
+ sink = n
+}
+
+// A simple random number generator used to make switches conditionally predictable.
+type rng uint64
+
+func newRNG() rng {
+ return 1
+}
+func (r rng) next(predictable bool) rng {
+ if predictable {
+ return r + 1
+ }
+ return rng(bits.RotateLeft64(uint64(r), 13) * 0x3c374d)
+}
+func (r rng) value() uint64 {
+ return uint64(r)
+}
diff --git a/src/cmd/compile/internal/test/testdata/addressed_test.go b/src/cmd/compile/internal/test/testdata/addressed_test.go
index cdabf978f0..4cc9ac4d5b 100644
--- a/src/cmd/compile/internal/test/testdata/addressed_test.go
+++ b/src/cmd/compile/internal/test/testdata/addressed_test.go
@@ -145,6 +145,7 @@ func (v V) val() int64 {
// and y.val() should be equal to which and y.p.val() should
// be equal to z.val(). Also, x(.p)**8 == x; that is, the
// autos are all linked into a ring.
+//
//go:noinline
func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
fill_ssa(v.w, v.x, &v, v.p) // gratuitous no-op to force addressing
@@ -191,6 +192,7 @@ func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
// gets is an address-mentioning way of implementing
// structure assignment.
+//
//go:noinline
func (to *V) gets(from *V) {
*to = *from
@@ -198,12 +200,14 @@ func (to *V) gets(from *V) {
// gets is an address-and-interface-mentioning way of
// implementing structure assignment.
+//
//go:noinline
func (to *V) getsI(from interface{}) {
*to = *from.(*V)
}
// fill_ssa initializes r with V{w:w, x:x, p:p}
+//
//go:noinline
func fill_ssa(w, x int64, r, p *V) {
*r = V{w: w, x: x, p: p}
diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go
index 7d54a9181d..253142a0fb 100644
--- a/src/cmd/compile/internal/test/testdata/arith_test.go
+++ b/src/cmd/compile/internal/test/testdata/arith_test.go
@@ -225,6 +225,7 @@ func testArithConstShift(t *testing.T) {
// overflowConstShift_ssa verifes that constant folding for shift
// doesn't wrap (i.e. x << MAX_INT << 1 doesn't get folded to x << 0).
+//
//go:noinline
func overflowConstShift64_ssa(x int64) int64 {
return x << uint64(0xffffffffffffffff) << uint64(1)
diff --git a/src/cmd/compile/internal/test/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go
index 16d571ce2c..ff3a1609c5 100644
--- a/src/cmd/compile/internal/test/testdata/ctl_test.go
+++ b/src/cmd/compile/internal/test/testdata/ctl_test.go
@@ -117,6 +117,7 @@ type junk struct {
// flagOverwrite_ssa is intended to reproduce an issue seen where a XOR
// was scheduled between a compare and branch, clearing flags.
+//
//go:noinline
func flagOverwrite_ssa(s *junk, c int) int {
if '0' <= c && c <= '9' {
diff --git a/src/cmd/compile/internal/test/testdata/fp_test.go b/src/cmd/compile/internal/test/testdata/fp_test.go
index 7d61a8063e..b96ce84a6c 100644
--- a/src/cmd/compile/internal/test/testdata/fp_test.go
+++ b/src/cmd/compile/internal/test/testdata/fp_test.go
@@ -14,6 +14,7 @@ import (
// manysub_ssa is designed to tickle bugs that depend on register
// pressure or unfriendly operand ordering in registers (and at
// least once it succeeded in this).
+//
//go:noinline
func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd float64) {
aa = a + 11.0 - a
@@ -37,6 +38,7 @@ func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc
// fpspill_ssa attempts to trigger a bug where phis with floating point values
// were stored in non-fp registers causing an error in doasm.
+//
//go:noinline
func fpspill_ssa(a int) float64 {
diff --git a/src/cmd/compile/internal/test/testdata/loadstore_test.go b/src/cmd/compile/internal/test/testdata/loadstore_test.go
index 57571f5d17..052172819a 100644
--- a/src/cmd/compile/internal/test/testdata/loadstore_test.go
+++ b/src/cmd/compile/internal/test/testdata/loadstore_test.go
@@ -73,6 +73,7 @@ var b int
// testDeadStorePanic_ssa ensures that we don't optimize away stores
// that could be read by after recover(). Modeled after fixedbugs/issue1304.
+//
//go:noinline
func testDeadStorePanic_ssa(a int) (r int) {
defer func() {
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
index 67597cebb4..581928c005 100644
--- a/src/cmd/compile/internal/typecheck/builtin.go
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -212,6 +212,7 @@ var runtimeDecls = [...]struct {
}
// Not inlining this function removes a significant chunk of init code.
+//
//go:noinline
func newSig(params, results []*types.Field) *types.Type {
return types.NewSignature(types.NoPkg, nil, nil, params, results)
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
index 1422ab0031..a626c000be 100644
--- a/src/cmd/compile/internal/typecheck/const.go
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -620,7 +620,8 @@ func OrigInt(n ir.Node, v int64) ir.Node {
// get the same type going out.
// force means must assign concrete (non-ideal) type.
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
-// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
+//
+// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
if l.Type() == nil || r.Type() == nil {
return l, r
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index e6adc05a65..f0b7b74aed 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -76,8 +76,9 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
// tcArith typechecks operands of a binary arithmetic expression.
// The result of tcArith MUST be assigned back to original operands,
// t is the type of the expression, and should be set by the caller. e.g:
-// n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
-// n.SetType(t)
+//
+// n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
+// n.SetType(t)
func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
l, r = defaultlit2(l, r, false)
if l.Type() == nil || r.Type() == nil {
@@ -194,7 +195,8 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type)
}
// The result of tcCompLit MUST be assigned back to n, e.g.
-// n.Left = tcCompLit(n.Left)
+//
+// n.Left = tcCompLit(n.Left)
func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
if base.EnableTrace && base.Flag.LowerT {
defer tracePrint("tcCompLit", n)(&res)
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index 5d319eaca3..12159b71e1 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -258,7 +258,7 @@ import (
// 1: added column details to Pos
// 2: added information for generic function/types. The export of non-generic
// functions/types remains largely backward-compatible. Breaking changes include:
-// - a 'kind' byte is added to constant values
+// - a 'kind' byte is added to constant values
const (
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
diff --git a/src/cmd/compile/internal/typecheck/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go
index 6dbd1869b3..9b27557956 100644
--- a/src/cmd/compile/internal/typecheck/mkbuiltin.go
+++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go
@@ -105,6 +105,7 @@ func mkbuiltin(w io.Writer, name string) {
fmt.Fprintln(w, `
// Not inlining this function removes a significant chunk of init code.
+//
//go:noinline
func newSig(params, results []*types.Field) *types.Type {
return types.NewSignature(types.NoPkg, nil, nil, params, results)
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
index 6c2e84680b..1f60f31851 100644
--- a/src/cmd/compile/internal/typecheck/syms.go
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -24,7 +24,8 @@ func LookupRuntime(name string) *ir.Name {
// successive occurrences of the "any" placeholder in the
// type syntax expression n.Type.
// The result of SubstArgTypes MUST be assigned back to old, e.g.
-// n.Left = SubstArgTypes(n.Left, t1, t2)
+//
+// n.Left = SubstArgTypes(n.Left, t1, t2)
func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
for _, t := range types_ {
types.CalcSize(t)
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index 85de653a82..2eb9e6d718 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -240,7 +240,8 @@ func typecheckNtype(n ir.Ntype) ir.Ntype {
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
-// n.Left = typecheck(n.Left, top)
+//
+// n.Left = typecheck(n.Left, top)
func typecheck(n ir.Node, top int) (res ir.Node) {
// cannot type check until all the source has been parsed
if !TypecheckAllowed {
@@ -414,7 +415,8 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
// but also accepts untyped numeric values representable as
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
-// n.Left = indexlit(n.Left)
+//
+// n.Left = indexlit(n.Left)
func indexlit(n ir.Node) ir.Node {
if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
return DefaultLit(n, types.Types[types.TINT])
@@ -961,7 +963,8 @@ func checksliceconst(lo ir.Node, hi ir.Node) bool {
}
// The result of implicitstar MUST be assigned back to n, e.g.
-// n.Left = implicitstar(n.Left)
+//
+// n.Left = implicitstar(n.Left)
func implicitstar(n ir.Node) ir.Node {
// insert implicit * if needed for fixed array
t := n.Type()
@@ -1607,7 +1610,8 @@ func checkassignto(src *types.Type, dst ir.Node) {
}
// The result of stringtoruneslit MUST be assigned back to n, e.g.
-// n.Left = stringtoruneslit(n.Left)
+//
+// n.Left = stringtoruneslit(n.Left)
func stringtoruneslit(n *ir.ConvExpr) ir.Node {
if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
base.Fatalf("stringtoarraylit %v", n)
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 147194c369..987352babc 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -492,9 +492,9 @@ type Slice struct {
// A Field is a (Sym, Type) pairing along with some other information, and,
// depending on the context, is used to represent:
-// - a field in a struct
-// - a method in an interface or associated with a named type
-// - a function parameter
+// - a field in a struct
+// - a method in an interface or associated with a named type
+// - a function parameter
type Field struct {
flags bitset8
@@ -1121,9 +1121,10 @@ func (t *Type) SimpleString() string {
}
// Cmp is a comparison between values a and b.
-// -1 if a < b
-// 0 if a == b
-// 1 if a > b
+//
+// -1 if a < b
+// 0 if a == b
+// 1 if a > b
type Cmp int8
const (
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 34bb29cadc..54cddaee28 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -21,7 +21,6 @@
// Type inference computes the type (Type) of every expression (syntax.Expr)
// and checks for compliance with the language specification.
// Use Info.Types[expr].Type for the results of type inference.
-//
package types2
import (
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 528beaacea..fde7291b03 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -311,6 +311,18 @@ func TestTypesInfo(t *testing.T) {
`[][]struct{}`,
},
+ // issue 47243
+ {`package issue47243_a; var x int32; var _ = x << 3`, `3`, `untyped int`},
+ {`package issue47243_b; var x int32; var _ = x << 3.`, `3.`, `untyped float`},
+ {`package issue47243_c; var x int32; var _ = 1 << x`, `1 << x`, `int`},
+ {`package issue47243_d; var x int32; var _ = 1 << x`, `1`, `int`},
+ {`package issue47243_e; var x int32; var _ = 1 << 2`, `1`, `untyped int`},
+ {`package issue47243_f; var x int32; var _ = 1 << 2`, `2`, `untyped int`},
+ {`package issue47243_g; var x int32; var _ = int(1) << 2`, `2`, `untyped int`},
+ {`package issue47243_h; var x int32; var _ = 1 << (2 << x)`, `1`, `int`},
+ {`package issue47243_i; var x int32; var _ = 1 << (2 << x)`, `(2 << x)`, `untyped int`},
+ {`package issue47243_j; var x int32; var _ = 1 << (2 << x)`, `2`, `untyped int`},
+
// tests for broken code that doesn't parse or type-check
{brokenPkg + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`},
{brokenPkg + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`},
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
index ec242c5e22..2e1ae0d2be 100644
--- a/src/cmd/compile/internal/types2/check_test.go
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -263,7 +263,7 @@ func testFiles(t *testing.T, filenames []string, colDelta uint, manual bool) {
// (and a separating "--"). For instance, to test the package made
// of the files foo.go and bar.go, use:
//
-// go test -run Manual -- foo.go bar.go
+// go test -run Manual -- foo.go bar.go
//
// If no source arguments are provided, the file testdata/manual.go
// is used instead.
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 1ecb4ff54b..e0c22f5b03 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -954,32 +954,48 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) {
// spec: "The right operand in a shift expression must have integer type
// or be an untyped constant representable by a value of type uint."
- // Provide a good error message for negative shift counts.
+ // Check that constants are representable by uint, but do not convert them
+ // (see also issue #47243).
if y.mode == constant_ {
+ // Provide a good error message for negative shift counts.
yval := constant.ToInt(y.val) // consider -1, 1.0, but not -1.1
if yval.Kind() == constant.Int && constant.Sign(yval) < 0 {
check.errorf(y, invalidOp+"negative shift count %s", y)
x.mode = invalid
return
}
- }
- // Caution: Check for isUntyped first because isInteger includes untyped
- // integers (was bug #43697).
- if isUntyped(y.typ) {
- check.convertUntyped(y, Typ[Uint])
- if y.mode == invalid {
+ if isUntyped(y.typ) {
+ // Caution: Check for representability here, rather than in the switch
+ // below, because isInteger includes untyped integers (was bug #43697).
+ check.representable(y, Typ[Uint])
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ }
+ } else {
+ // Check that RHS is otherwise at least of integer type.
+ switch {
+ case allInteger(y.typ):
+ if !allUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) {
+ check.errorf(y, invalidOp+"signed shift count %s requires go1.13 or later", y)
+ x.mode = invalid
+ return
+ }
+ case isUntyped(y.typ):
+ // This is incorrect, but preserves pre-existing behavior.
+ // See also bug #47410.
+ check.convertUntyped(y, Typ[Uint])
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ default:
+ check.errorf(y, invalidOp+"shift count %s must be integer", y)
x.mode = invalid
return
}
- } else if !allInteger(y.typ) {
- check.errorf(y, invalidOp+"shift count %s must be integer", y)
- x.mode = invalid
- return
- } else if !allUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) {
- check.versionErrorf(y, "go1.13", invalidOp+"signed shift count %s", y)
- x.mode = invalid
- return
}
if x.mode == constant_ {
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 9f7e593eeb..9e77d67a7d 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -23,10 +23,10 @@ const useConstraintTypeInference = true
//
// Inference proceeds as follows. Starting with given type arguments:
//
-// 1) apply FTI (function type inference) with typed arguments,
-// 2) apply CTI (constraint type inference),
-// 3) apply FTI with untyped function arguments,
-// 4) apply CTI.
+// 1. apply FTI (function type inference) with typed arguments,
+// 2. apply CTI (constraint type inference),
+// 3. apply FTI with untyped function arguments,
+// 4. apply CTI.
//
// The process stops as soon as all type arguments are known or an error occurs.
func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (result []Type) {
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index 93defd6618..684bbf7a8b 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -25,9 +25,9 @@ import (
// The last index entry is the field or method index in the (possibly embedded)
// type where the entry was found, either:
//
-// 1) the list of declared methods of a named type; or
-// 2) the list of all methods (method set) of an interface type; or
-// 3) the list of fields of a struct type.
+// 1. the list of declared methods of a named type; or
+// 2. the list of all methods (method set) of an interface type; or
+// 3. the list of fields of a struct type.
//
// The earlier index entries are the indices of the embedded struct fields
// traversed to get to the found entry, starting at depth 0.
@@ -35,12 +35,12 @@ import (
// If no entry is found, a nil object is returned. In this case, the returned
// index and indirect values have the following meaning:
//
-// - If index != nil, the index sequence points to an ambiguous entry
-// (the same name appeared more than once at the same embedding level).
+// - If index != nil, the index sequence points to an ambiguous entry
+// (the same name appeared more than once at the same embedding level).
//
-// - If indirect is set, a method with a pointer receiver type was found
-// but there was no pointer on the path from the actual receiver type to
-// the method's formal receiver base type, nor was the receiver addressable.
+// - If indirect is set, a method with a pointer receiver type was found
+// but there was no pointer on the path from the actual receiver type to
+// the method's formal receiver base type, nor was the receiver addressable.
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
if T == nil {
panic("LookupFieldOrMethod on nil type")
diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go
index ee63214407..c820a29fad 100644
--- a/src/cmd/compile/internal/types2/selection.go
+++ b/src/cmd/compile/internal/types2/selection.go
@@ -92,9 +92,9 @@ func (s *Selection) Type() Type {
// The last index entry is the field or method index of the type declaring f;
// either:
//
-// 1) the list of declared methods of a named type; or
-// 2) the list of methods of an interface type; or
-// 3) the list of fields of a struct type.
+// 1. the list of declared methods of a named type; or
+// 2. the list of methods of an interface type; or
+// 3. the list of fields of a struct type.
//
// The earlier index entries are the indices of the embedded fields implicitly
// traversed to get from (the type of) x to f, starting at embedding depth 0.
@@ -111,6 +111,7 @@ func (s *Selection) String() string { return SelectionString(s, nil) }
// package-level objects, and may be nil.
//
// Examples:
+//
// "field (T) f int"
// "method (T) f(X) Y"
// "method expr (T) f(X) Y"
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index 7a34b6474c..f530849a9d 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -24,19 +24,19 @@ type Sizes interface {
// StdSizes is a convenience type for creating commonly used Sizes.
// It makes the following simplifying assumptions:
//
-// - The size of explicitly sized basic types (int16, etc.) is the
-// specified size.
-// - The size of strings and interfaces is 2*WordSize.
-// - The size of slices is 3*WordSize.
-// - The size of an array of n elements corresponds to the size of
-// a struct of n consecutive fields of the array's element type.
-// - The size of a struct is the offset of the last field plus that
-// field's size. As with all element types, if the struct is used
-// in an array its size must first be aligned to a multiple of the
-// struct's alignment.
-// - All other types have size WordSize.
-// - Arrays and structs are aligned per spec definition; all other
-// types are naturally aligned with a maximum alignment MaxAlign.
+// - The size of explicitly sized basic types (int16, etc.) is the
+// specified size.
+// - The size of strings and interfaces is 2*WordSize.
+// - The size of slices is 3*WordSize.
+// - The size of an array of n elements corresponds to the size of
+// a struct of n consecutive fields of the array's element type.
+// - The size of a struct is the offset of the last field plus that
+// field's size. As with all element types, if the struct is used
+// in an array its size must first be aligned to a multiple of the
+// struct's alignment.
+// - All other types have size WordSize.
+// - Arrays and structs are aligned per spec definition; all other
+// types are naturally aligned with a maximum alignment MaxAlign.
//
// *StdSizes implements Sizes.
type StdSizes struct {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue52031.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue52031.go
new file mode 100644
index 0000000000..448a550b25
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue52031.go
@@ -0,0 +1,33 @@
+// -lang=go1.12
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type resultFlags uint
+
+// Example from #52031.
+//
+// The following shifts should not produce errors on Go < 1.13, as their
+// untyped constant operands are representable by type uint.
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Invalid cases.
+var x int = 1
+var _ = (8 << x /* ERROR "signed shift count .* requires go1.13 or later" */)
+
+const _ = (1 << 1.2 /* ERROR "truncated to uint" */)
+
+var y float64
+var _ = (1 << y /* ERROR "must be integer" */)
diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go
index 3d82a37ab8..97791324e1 100644
--- a/src/cmd/compile/internal/types2/typeterm.go
+++ b/src/cmd/compile/internal/types2/typeterm.go
@@ -6,10 +6,10 @@ package types2
// A term describes elementary type sets:
//
-// ∅: (*term)(nil) == ∅ // set of no types (empty set)
-// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
-// T: &term{false, T} == {T} // set of type T
-// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
type term struct {
tilde bool // valid if typ != nil
typ Type
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
index 9b09e097fa..c44d934f21 100644
--- a/src/cmd/compile/internal/walk/assign.go
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -242,6 +242,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node {
// check assign type list to
// an expression list. called in
+//
// expr-list = func()
func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if len(nl) != nr.NumFields() {
@@ -273,6 +274,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
// check assign expression list to
// an expression list. called in
+//
// expr-list = expr-list
func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
// cannot happen: should have been rejected during type checking
@@ -455,17 +457,18 @@ func readsMemory(n ir.Node) bool {
}
// expand append(l1, l2...) to
-// init {
-// s := l1
-// n := len(s) + len(l2)
-// // Compare as uint so growslice can panic on overflow.
-// if uint(n) > uint(cap(s)) {
-// s = growslice(s, n)
-// }
-// s = s[:n]
-// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-// }
-// s
+//
+// init {
+// s := l1
+// n := len(s) + len(l2)
+// // Compare as uint so growslice can panic on overflow.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(s, n)
+// }
+// s = s[:n]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
//
// l2 is allowed to be a string.
func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
@@ -597,32 +600,33 @@ func isAppendOfMake(n ir.Node) bool {
}
// extendSlice rewrites append(l1, make([]T, l2)...) to
-// init {
-// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
-// } else {
-// panicmakeslicelen()
-// }
-// s := l1
-// n := len(s) + l2
-// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
-// // cap is a positive int and n can become negative when len(s) + l2
-// // overflows int. Interpreting n when negative as uint makes it larger
-// // than cap(s). growslice will check the int n arg and panic if n is
-// // negative. This prevents the overflow from being undetected.
-// if uint(n) > uint(cap(s)) {
-// s = growslice(T, s, n)
-// }
-// s = s[:n]
-// lptr := &l1[0]
-// sptr := &s[0]
-// if lptr == sptr || !T.HasPointers() {
-// // growslice did not clear the whole underlying array (or did not get called)
-// hp := &s[len(l1)]
-// hn := l2 * sizeof(T)
-// memclr(hp, hn)
-// }
-// }
-// s
+//
+// init {
+// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
+// } else {
+// panicmakeslicelen()
+// }
+// s := l1
+// n := len(s) + l2
+// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+// // cap is a positive int and n can become negative when len(s) + l2
+// // overflows int. Interpreting n when negative as uint makes it larger
+// // than cap(s). growslice will check the int n arg and panic if n is
+// // negative. This prevents the overflow from being undetected.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(T, s, n)
+// }
+// s = s[:n]
+// lptr := &l1[0]
+// sptr := &s[0]
+// if lptr == sptr || !T.HasPointers() {
+// // growslice did not clear the whole underlying array (or did not get called)
+// hp := &s[len(l1)]
+// hn := l2 * sizeof(T)
+// memclr(hp, hn)
+// }
+// }
+// s
func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 7ec5494d99..d7b553ed0c 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -26,19 +26,19 @@ import (
//
// For race detector, expand append(src, a [, b]* ) to
//
-// init {
-// s := src
-// const argc = len(args) - 1
-// if cap(s) - len(s) < argc {
-// s = growslice(s, len(s)+argc)
-// }
-// n := len(s)
-// s = s[:n+argc]
-// s[n] = a
-// s[n+1] = b
-// ...
-// }
-// s
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, len(s)+argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
if !ir.SameSafeExpr(dst, n.Args[0]) {
n.Args[0] = safeExpr(n.Args[0], init)
diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go
index 625e216050..993f1392aa 100644
--- a/src/cmd/compile/internal/walk/compare.go
+++ b/src/cmd/compile/internal/walk/compare.go
@@ -16,7 +16,8 @@ import (
)
// The result of walkCompare MUST be assigned back to n, e.g.
-// n.Left = walkCompare(n.Left, init)
+//
+// n.Left = walkCompare(n.Left, init)
func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
return walkCompareInterface(n, init)
@@ -404,7 +405,8 @@ func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
}
// The result of finishCompare MUST be assigned back to n, e.g.
-// n.Left = finishCompare(n.Left, x, r, init)
+//
+// n.Left = finishCompare(n.Left, x, r, init)
func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck.Expr(r)
r = typecheck.Conv(r, n.Type())
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index 4c1e7adddd..26a23c4d09 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -20,7 +20,8 @@ import (
)
// The result of walkExpr MUST be assigned back to n, e.g.
-// n.Left = walkExpr(n.Left, init)
+//
+// n.Left = walkExpr(n.Left, init)
func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return n
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index cc37f95764..80806478be 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -237,7 +237,8 @@ func isaddrokay(n ir.Node) bool {
// If the original argument n is not okay, addrTemp creates a tmp, emits
// tmp = n, and then returns tmp.
// The result of addrTemp MUST be assigned back to n, e.g.
-// n.Left = o.addrTemp(n.Left)
+//
+// n.Left = o.addrTemp(n.Left)
func (o *orderState) addrTemp(n ir.Node) ir.Node {
if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
@@ -316,8 +317,10 @@ func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
// Returns a bool that signals if a modification was made.
//
// For:
-// x = m[string(k)]
-// x = m[T1{... Tn{..., string(k), ...}]
+//
+// x = m[string(k)]
+// x = m[T1{... Tn{..., string(k), ...}]
+//
// where k is []byte, T1 to Tn is a nesting of struct and array literals,
// the allocation of backing bytes for the string can be avoided
// by reusing the []byte backing array. These are special cases
@@ -400,9 +403,12 @@ func (o *orderState) stmtList(l ir.Nodes) {
}
// orderMakeSliceCopy matches the pattern:
-// m = OMAKESLICE([]T, x); OCOPY(m, s)
+//
+// m = OMAKESLICE([]T, x); OCOPY(m, s)
+//
// and rewrites it to:
-// m = OMAKESLICECOPY([]T, x, s); nil
+//
+// m = OMAKESLICECOPY([]T, x, s); nil
func orderMakeSliceCopy(s []ir.Node) {
if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
return
@@ -473,7 +479,8 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
// exprInPlace orders the side effects in *np and
// leaves them as the init list of the final *np.
// The result of exprInPlace MUST be assigned back to n, e.g.
-// n.Left = o.exprInPlace(n.Left)
+//
+// n.Left = o.exprInPlace(n.Left)
func (o *orderState) exprInPlace(n ir.Node) ir.Node {
var order orderState
order.free = o.free
@@ -489,7 +496,9 @@ func (o *orderState) exprInPlace(n ir.Node) ir.Node {
// orderStmtInPlace orders the side effects of the single statement *np
// and replaces it with the resulting statement list.
// The result of orderStmtInPlace MUST be assigned back to n, e.g.
-// n.Left = orderStmtInPlace(n.Left)
+//
+// n.Left = orderStmtInPlace(n.Left)
+//
// free is a map that can be used to obtain temporary variables by type.
func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
var order orderState
@@ -1087,7 +1096,8 @@ func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
// Otherwise lhs == nil. (When lhs != nil it may be possible
// to avoid copying the result of the expression to a temporary.)
// The result of expr MUST be assigned back to n, e.g.
-// n.Left = o.expr(n.Left, lhs)
+//
+// n.Left = o.expr(n.Left, lhs)
func (o *orderState) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
@@ -1451,10 +1461,14 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
// The caller should order the right-hand side of the assignment before calling order.as2func.
// It rewrites,
+//
// a, b, a = ...
+//
// as
+//
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
+//
// This is necessary to ensure left to right assignment order.
func (o *orderState) as2func(n *ir.AssignListStmt) {
results := n.Rhs[0].Type()
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index f09e916546..8a42dbf777 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -10,7 +10,8 @@ import (
)
// The result of walkStmt MUST be assigned back to n, e.g.
-// n.Left = walkStmt(n.Left)
+//
+// n.Left = walkStmt(n.Left)
func walkStmt(n ir.Node) ir.Node {
if n == nil {
return n
@@ -84,6 +85,7 @@ func walkStmt(n ir.Node) ir.Node {
ir.OFALL,
ir.OGOTO,
ir.OLABEL,
+ ir.OJUMPTABLE,
ir.ODCL,
ir.ODCLCONST,
ir.ODCLTYPE,
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
index 3705c5b192..5067d5eb49 100644
--- a/src/cmd/compile/internal/walk/switch.go
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -11,6 +11,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
@@ -66,6 +67,7 @@ func walkSwitchExpr(sw *ir.SwitchStmt) {
base.Pos = lno
s := exprSwitch{
+ pos: lno,
exprname: cond,
}
@@ -112,6 +114,7 @@ func walkSwitchExpr(sw *ir.SwitchStmt) {
// An exprSwitch walks an expression switch.
type exprSwitch struct {
+ pos src.XPos
exprname ir.Node // value being switched on
done ir.Nodes
@@ -182,17 +185,59 @@ func (s *exprSwitch) flush() {
}
runs = append(runs, cc[start:])
- // Perform two-level binary search.
- binarySearch(len(runs), &s.done,
- func(i int) ir.Node {
- return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(runs[i-1])))
- },
- func(i int, nif *ir.IfStmt) {
- run := runs[i]
- nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(run)))
- s.search(run, &nif.Body)
- },
- )
+ if len(runs) == 1 {
+ s.search(runs[0], &s.done)
+ return
+ }
+ // We have strings of more than one length. Generate an
+ // outer switch which switches on the length of the string
+ // and an inner switch in each case which resolves all the
+ // strings of the same length. The code looks something like this:
+
+ // goto outerLabel
+ // len5:
+ // ... search among length 5 strings ...
+ // goto endLabel
+ // len8:
+ // ... search among length 8 strings ...
+ // goto endLabel
+ // ... other lengths ...
+ // outerLabel:
+ // switch len(s) {
+ // case 5: goto len5
+ // case 8: goto len8
+ // ... other lengths ...
+ // }
+ // endLabel:
+
+ outerLabel := typecheck.AutoLabel(".s")
+ endLabel := typecheck.AutoLabel(".s")
+
+ // Jump around all the individual switches for each length.
+ s.done.Append(ir.NewBranchStmt(s.pos, ir.OGOTO, outerLabel))
+
+ var outer exprSwitch
+ outer.exprname = ir.NewUnaryExpr(s.pos, ir.OLEN, s.exprname)
+ outer.exprname.SetType(types.Types[types.TINT])
+
+ for _, run := range runs {
+ // Target label to jump to when we match this length.
+ label := typecheck.AutoLabel(".s")
+
+ // Search within this run of same-length strings.
+ pos := run[0].pos
+ s.done.Append(ir.NewLabelStmt(pos, label))
+ s.search(run, &s.done)
+ s.done.Append(ir.NewBranchStmt(pos, ir.OGOTO, endLabel))
+
+ // Add length case to outer switch.
+ cas := ir.NewBasicLit(pos, constant.MakeInt64(runLen(run)))
+ jmp := ir.NewBranchStmt(pos, ir.OGOTO, label)
+ outer.Add(pos, cas, jmp)
+ }
+ s.done.Append(ir.NewLabelStmt(s.pos, outerLabel))
+ outer.Emit(&s.done)
+ s.done.Append(ir.NewLabelStmt(s.pos, endLabel))
return
}
@@ -223,6 +268,9 @@ func (s *exprSwitch) flush() {
}
func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
+ if s.tryJumpTable(cc, out) {
+ return
+ }
binarySearch(len(cc), out,
func(i int) ir.Node {
return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi)
@@ -235,6 +283,48 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
)
}
+// Try to implement the clauses with a jump table. Returns true if successful.
+func (s *exprSwitch) tryJumpTable(cc []exprClause, out *ir.Nodes) bool {
+ const go119UseJumpTables = true
+ const minCases = 8 // have at least minCases cases in the switch
+ const minDensity = 4 // use at least 1 out of every minDensity entries
+
+ if !go119UseJumpTables || base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable {
+ return false
+ }
+ if len(cc) < minCases {
+ return false // not enough cases for it to be worth it
+ }
+ if cc[0].lo.Val().Kind() != constant.Int {
+ return false // e.g. float
+ }
+ if s.exprname.Type().Size() > int64(types.PtrSize) {
+ return false // 64-bit switches on 32-bit archs
+ }
+ min := cc[0].lo.Val()
+ max := cc[len(cc)-1].hi.Val()
+ width := constant.BinaryOp(constant.BinaryOp(max, token.SUB, min), token.ADD, constant.MakeInt64(1))
+ limit := constant.MakeInt64(int64(len(cc)) * minDensity)
+ if constant.Compare(width, token.GTR, limit) {
+ // We disable jump tables if we use less than a minimum fraction of the entries.
+ // i.e. for switch x {case 0: case 1000: case 2000:} we don't want to use a jump table.
+ return false
+ }
+ jt := ir.NewJumpTableStmt(base.Pos, s.exprname)
+ for _, c := range cc {
+ jmp := c.jmp.(*ir.BranchStmt)
+ if jmp.Op() != ir.OGOTO || jmp.Label == nil {
+ panic("bad switch case body")
+ }
+ for i := c.lo.Val(); constant.Compare(i, token.LEQ, c.hi.Val()); i = constant.BinaryOp(i, token.ADD, constant.MakeInt64(1)) {
+ jt.Cases = append(jt.Cases, i)
+ jt.Targets = append(jt.Targets, jmp.Label)
+ }
+ }
+ out.Append(jt)
+ return true
+}
+
func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
@@ -540,6 +630,7 @@ func (s *typeSwitch) flush() {
}
cc = merged
+ // TODO: figure out if we could use a jump table using some low bits of the type hashes.
binarySearch(len(cc), &s.done,
func(i int) ir.Node {
return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(int64(cc[i-1].hash)))
@@ -562,7 +653,7 @@ func (s *typeSwitch) flush() {
// then cases before i will be tested; otherwise, cases i and later.
//
// leaf(i, nif) should setup nif (an OIF node) to test case i. In
-// particular, it should set nif.Left and nif.Nbody.
+// particular, it should set nif.Cond and nif.Body.
func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
const binarySearchMin = 4 // minimum number of cases for binary search
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index 32e29f347b..378100b162 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -106,7 +106,9 @@ func moveByType(t *types.Type) obj.As {
}
// opregreg emits instructions for
-// dest := dest(To) op src(From)
+//
+// dest := dest(To) op src(From)
+//
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
@@ -725,7 +727,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// caller's SP is the address of the first arg
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize // 0 on 386, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/cover/cover.go b/src/cmd/cover/cover.go
index 9c8529f7eb..86ef128f2c 100644
--- a/src/cmd/cover/cover.go
+++ b/src/cmd/cover/cover.go
@@ -377,7 +377,7 @@ func (f *File) newCounter(start, end token.Pos, numStmt int) string {
// S1
// if cond {
// S2
-// }
+// }
// S3
//
// counters will be added before S1 and before S3. The block containing S2
diff --git a/src/cmd/cover/cover_test.go b/src/cmd/cover/cover_test.go
index 8bd31514a0..28be231121 100644
--- a/src/cmd/cover/cover_test.go
+++ b/src/cmd/cover/cover_test.go
@@ -162,8 +162,8 @@ func buildCover(t *testing.T) {
// Run this shell script, but do it in Go so it can be run by "go test".
//
// replace the word LINE with the line number < testdata/test.go > testdata/test_line.go
-// go build -o testcover
-// testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go
+// go build -o testcover
+// testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go
// go run ./testdata/main.go ./testdata/test.go
func TestCover(t *testing.T) {
t.Parallel()
diff --git a/src/cmd/cover/doc.go b/src/cmd/cover/doc.go
index e2c849419a..e091ce9e30 100644
--- a/src/cmd/cover/doc.go
+++ b/src/cmd/cover/doc.go
@@ -19,6 +19,7 @@ must be applied to the output of cgo preprocessing, not the input,
because cover deletes comments that are significant to cgo.
For usage information, please see:
+
go help testflag
go tool cover -help
*/
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index db2ac1f2a6..bbaf595421 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -26,7 +26,7 @@ import (
// The usual variables.
var (
goarch string
- gobin string
+ gorootBin string
gohostarch string
gohostos string
goos string
@@ -112,6 +112,7 @@ func xinit() {
fatalf("$GOROOT must be set")
}
goroot = filepath.Clean(b)
+ gorootBin = pathf("%s/bin", goroot)
b = os.Getenv("GOROOT_FINAL")
if b == "" {
@@ -119,12 +120,6 @@ func xinit() {
}
goroot_final = b
- b = os.Getenv("GOBIN")
- if b == "" {
- b = pathf("%s/bin", goroot)
- }
- gobin = b
-
b = os.Getenv("GOOS")
if b == "" {
b = gohostos
@@ -241,9 +236,19 @@ func xinit() {
// make.bash really does start from a clean slate.
os.Setenv("GOCACHE", pathf("%s/pkg/obj/go-build", goroot))
+ // Set GOBIN to GOROOT/bin. The meaning of GOBIN has drifted over time
+ // (see https://go.dev/issue/3269, https://go.dev/cl/183058,
+ // https://go.dev/issue/31576). Since we want binaries installed by 'dist' to
+ // always go to GOROOT/bin anyway.
+ os.Setenv("GOBIN", gorootBin)
+
// Make the environment more predictable.
os.Setenv("LANG", "C")
os.Setenv("LANGUAGE", "en_US.UTF8")
+ os.Unsetenv("GO111MODULE")
+ os.Setenv("GOENV", "off")
+ os.Unsetenv("GOFLAGS")
+ os.Setenv("GOWORK", "off")
workdir = xworkdir()
if err := ioutil.WriteFile(pathf("%s/go.mod", workdir), []byte("module bootstrap"), 0666); err != nil {
@@ -490,16 +495,6 @@ func setup() {
xremove(pathf("%s/bin/%s", goroot, old))
}
- // If $GOBIN is set and has a Go compiler, it must be cleaned.
- for _, char := range "56789" {
- if isfile(pathf("%s/%c%s", gobin, char, "g")) {
- for _, old := range oldtool {
- xremove(pathf("%s/%s", gobin, old))
- }
- break
- }
- }
-
// For release, make sure excluded things are excluded.
goversion := findgoversion()
if strings.HasPrefix(goversion, "release.") || (strings.HasPrefix(goversion, "go") && !strings.Contains(goversion, "beta")) {
@@ -1126,8 +1121,8 @@ func clean() {
// The env command prints the default environment.
func cmdenv() {
path := flag.Bool("p", false, "emit updated PATH")
- plan9 := flag.Bool("9", false, "emit plan 9 syntax")
- windows := flag.Bool("w", false, "emit windows syntax")
+ plan9 := flag.Bool("9", gohostos == "plan9", "emit plan 9 syntax")
+ windows := flag.Bool("w", gohostos == "windows", "emit windows syntax")
xflagparse(0)
format := "%s=\"%s\"\n"
@@ -1138,10 +1133,13 @@ func cmdenv() {
format = "set %s=%s\r\n"
}
+ xprintf(format, "GO111MODULE", "")
xprintf(format, "GOARCH", goarch)
- xprintf(format, "GOBIN", gobin)
+ xprintf(format, "GOBIN", gorootBin)
xprintf(format, "GOCACHE", os.Getenv("GOCACHE"))
xprintf(format, "GODEBUG", os.Getenv("GODEBUG"))
+ xprintf(format, "GOENV", "off")
+ xprintf(format, "GOFLAGS", "")
xprintf(format, "GOHOSTARCH", gohostarch)
xprintf(format, "GOHOSTOS", gohostos)
xprintf(format, "GOOS", goos)
@@ -1167,13 +1165,14 @@ func cmdenv() {
if goarch == "ppc64" || goarch == "ppc64le" {
xprintf(format, "GOPPC64", goppc64)
}
+ xprintf(format, "GOWORK", "off")
if *path {
sep := ":"
if gohostos == "windows" {
sep = ";"
}
- xprintf(format, "PATH", fmt.Sprintf("%s%s%s", gobin, sep, os.Getenv("PATH")))
+ xprintf(format, "PATH", fmt.Sprintf("%s%s%s", gorootBin, sep, os.Getenv("PATH")))
}
}
@@ -1226,7 +1225,9 @@ var toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link"}
// commands (like "go tool dist test" in run.bash) can rely on bug fixes
// made since Go 1.4, but this function cannot. In particular, the uses
// of os/exec in this function cannot assume that
+//
// cmd.Env = append(os.Environ(), "X=Y")
+//
// sets $X to Y in the command's environment. That guarantee was
// added after Go 1.4, and in fact in Go 1.4 it was typically the opposite:
// if $X was already present in os.Environ(), most systems preferred
@@ -1318,7 +1319,7 @@ func cmdbootstrap() {
gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now
goldflags = os.Getenv("GO_LDFLAGS") // we were using $BOOT_GO_LDFLAGS until now
goBootstrap := pathf("%s/go_bootstrap", tooldir)
- cmdGo := pathf("%s/go", gobin)
+ cmdGo := pathf("%s/go", gorootBin)
if debug {
run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full")
copyfile(pathf("%s/compile1", tooldir), pathf("%s/compile", tooldir), writeExec)
@@ -1457,7 +1458,7 @@ func cmdbootstrap() {
os.Setenv("GOOS", gohostos)
os.Setenv("GOARCH", gohostarch)
os.Setenv("CC", compilerEnvLookup(defaultcc, gohostos, gohostarch))
- goCmd(cmdGo, "build", "-o", pathf("%s/go_%s_%s_exec%s", gobin, goos, goarch, exe), wrapperPath)
+ goCmd(cmdGo, "build", "-o", pathf("%s/go_%s_%s_exec%s", gorootBin, goos, goarch, exe), wrapperPath)
// Restore environment.
// TODO(elias.naur): support environment variables in goCmd?
os.Setenv("GOOS", goos)
@@ -1681,26 +1682,26 @@ func banner() {
}
xprintf("---\n")
xprintf("Installed Go for %s/%s in %s\n", goos, goarch, goroot)
- xprintf("Installed commands in %s\n", gobin)
+ xprintf("Installed commands in %s\n", gorootBin)
if !xsamefile(goroot_final, goroot) {
// If the files are to be moved, don't check that gobin
// is on PATH; assume they know what they are doing.
} else if gohostos == "plan9" {
- // Check that gobin is bound before /bin.
+ // Check that GOROOT/bin is bound before /bin.
pid := strings.Replace(readfile("#c/pid"), " ", "", -1)
ns := fmt.Sprintf("/proc/%s/ns", pid)
- if !strings.Contains(readfile(ns), fmt.Sprintf("bind -b %s /bin", gobin)) {
- xprintf("*** You need to bind %s before /bin.\n", gobin)
+ if !strings.Contains(readfile(ns), fmt.Sprintf("bind -b %s /bin", gorootBin)) {
+ xprintf("*** You need to bind %s before /bin.\n", gorootBin)
}
} else {
- // Check that gobin appears in $PATH.
+ // Check that GOROOT/bin appears in $PATH.
pathsep := ":"
if gohostos == "windows" {
pathsep = ";"
}
- if !strings.Contains(pathsep+os.Getenv("PATH")+pathsep, pathsep+gobin+pathsep) {
- xprintf("*** You need to add %s to your PATH.\n", gobin)
+ if !strings.Contains(pathsep+os.Getenv("PATH")+pathsep, pathsep+gorootBin+pathsep) {
+ xprintf("*** You need to add %s to your PATH.\n", gorootBin)
}
}
diff --git a/src/cmd/dist/doc.go b/src/cmd/dist/doc.go
index a4e6aa5cbf..ad26aa2dc0 100644
--- a/src/cmd/dist/doc.go
+++ b/src/cmd/dist/doc.go
@@ -5,15 +5,17 @@
// Dist helps bootstrap, build, and test the Go distribution.
//
// Usage:
-// go tool dist [command]
+//
+// go tool dist [command]
//
// The commands are:
-// banner print installation banner
-// bootstrap rebuild everything
-// clean deletes all built files
-// env [-p] print environment (-p: include $PATH)
-// install [dir] install individual directory
-// list [-json] list all supported platforms
-// test [-h] run Go test(s)
-// version print Go version
+//
+// banner print installation banner
+// bootstrap rebuild everything
+// clean deletes all built files
+// env [-p] print environment (-p: include $PATH)
+// install [dir] install individual directory
+// list [-json] list all supported platforms
+// test [-h] run Go test(s)
+// version print Go version
package main
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index 9118c133e5..ee521f81ba 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -100,8 +100,8 @@ func (t *tester) run() {
if goos == "windows" {
exeSuffix = ".exe"
}
- if _, err := os.Stat(filepath.Join(gobin, "go"+exeSuffix)); err == nil {
- os.Setenv("PATH", fmt.Sprintf("%s%c%s", gobin, os.PathListSeparator, os.Getenv("PATH")))
+ if _, err := os.Stat(filepath.Join(gorootBin, "go"+exeSuffix)); err == nil {
+ os.Setenv("PATH", fmt.Sprintf("%s%c%s", gorootBin, os.PathListSeparator, os.Getenv("PATH")))
}
cmd := exec.Command("go", "env", "CGO_ENABLED")
diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go
index ead4f722f6..5887ad3395 100644
--- a/src/cmd/doc/doc_test.go
+++ b/src/cmd/doc/doc_test.go
@@ -882,7 +882,9 @@ func TestDoc(t *testing.T) {
}
// Test the code to try multiple packages. Our test case is
+//
// go doc rand.Float64
+//
// This needs to find math/rand.Float64; however crypto/rand, which doesn't
// have the symbol, usually appears first in the directory listing.
func TestMultiplePackages(t *testing.T) {
@@ -939,11 +941,15 @@ func TestMultiplePackages(t *testing.T) {
}
// Test the code to look up packages when given two args. First test case is
+//
// go doc binary BigEndian
+//
// This needs to find encoding/binary.BigEndian, which means
// finding the package encoding/binary given only "binary".
// Second case is
+//
// go doc rand Float64
+//
// which again needs to find math/rand and not give up after crypto/rand,
// which has no such function.
func TestTwoArgLookup(t *testing.T) {
diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go
index dee5d7bbcd..3c45dd76df 100644
--- a/src/cmd/doc/main.go
+++ b/src/cmd/doc/main.go
@@ -5,20 +5,25 @@
// Doc (usually run as go doc) accepts zero, one or two arguments.
//
// Zero arguments:
+//
// go doc
+//
// Show the documentation for the package in the current directory.
//
// One argument:
+//
// go doc
// go doc [.]
// go doc [.][.]
// go doc [.][.]
+//
// The first item in this list that succeeds is the one whose documentation
// is printed. If there is a symbol but no package, the package in the current
// directory is chosen. However, if the argument begins with a capital
// letter it is always assumed to be a symbol in the current directory.
//
// Two arguments:
+//
// go doc [.]
//
// Show the documentation for the package, symbol, and method or field. The
diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go
index 49b68873b6..35f2eb24bf 100644
--- a/src/cmd/doc/pkg.go
+++ b/src/cmd/doc/pkg.go
@@ -25,8 +25,7 @@ import (
)
const (
- punchedCardWidth = 80 // These things just won't leave us alone.
- indentedWidth = punchedCardWidth - len(indent)
+ punchedCardWidth = 80
indent = " "
)
@@ -44,6 +43,14 @@ type Package struct {
buf pkgBuffer
}
+func (p *Package) ToText(w io.Writer, text, prefix, codePrefix string) {
+ d := p.doc.Parser().Parse(text)
+ pr := p.doc.Printer()
+ pr.TextPrefix = prefix
+ pr.TextCodePrefix = codePrefix
+ w.Write(pr.Text(d))
+}
+
// pkgBuffer is a wrapper for bytes.Buffer that prints a package clause the
// first time Write is called.
type pkgBuffer struct {
@@ -251,7 +258,7 @@ func (pkg *Package) emit(comment string, node ast.Node) {
}
if comment != "" && !showSrc {
pkg.newlines(1)
- doc.ToText(&pkg.buf, comment, indent, indent+indent, indentedWidth)
+ pkg.ToText(&pkg.buf, comment, indent, indent+indent)
pkg.newlines(2) // Blank line after comment to separate from next item.
} else {
pkg.newlines(1)
@@ -463,7 +470,7 @@ func joinStrings(ss []string) string {
// allDoc prints all the docs for the package.
func (pkg *Package) allDoc() {
pkg.Printf("") // Trigger the package clause; we know the package exists.
- doc.ToText(&pkg.buf, pkg.doc.Doc, "", indent, indentedWidth)
+ pkg.ToText(&pkg.buf, pkg.doc.Doc, "", indent)
pkg.newlines(1)
printed := make(map[*ast.GenDecl]bool)
@@ -523,7 +530,7 @@ func (pkg *Package) allDoc() {
func (pkg *Package) packageDoc() {
pkg.Printf("") // Trigger the package clause; we know the package exists.
if !short {
- doc.ToText(&pkg.buf, pkg.doc.Doc, "", indent, indentedWidth)
+ pkg.ToText(&pkg.buf, pkg.doc.Doc, "", indent)
pkg.newlines(1)
}
@@ -1033,9 +1040,9 @@ func (pkg *Package) printFieldDoc(symbol, fieldName string) bool {
if field.Doc != nil {
// To present indented blocks in comments correctly, process the comment as
// a unit before adding the leading // to each line.
- docBuf := bytes.Buffer{}
- doc.ToText(&docBuf, field.Doc.Text(), "", indent, indentedWidth)
- scanner := bufio.NewScanner(&docBuf)
+ docBuf := new(bytes.Buffer)
+ pkg.ToText(docBuf, field.Doc.Text(), "", indent)
+ scanner := bufio.NewScanner(docBuf)
for scanner.Scan() {
fmt.Fprintf(&pkg.buf, "%s// %s\n", indent, scanner.Bytes())
}
diff --git a/src/cmd/fix/cftype.go b/src/cmd/fix/cftype.go
index 27e4088aa9..e4988b1c62 100644
--- a/src/cmd/fix/cftype.go
+++ b/src/cmd/fix/cftype.go
@@ -24,9 +24,13 @@ var cftypeFix = fix{
}
// Old state:
-// type CFTypeRef unsafe.Pointer
+//
+// type CFTypeRef unsafe.Pointer
+//
// New state:
-// type CFTypeRef uintptr
+//
+// type CFTypeRef uintptr
+//
// and similar for other *Ref types.
// This fix finds nils initializing these types and replaces the nils with 0s.
func cftypefix(f *ast.File) bool {
diff --git a/src/cmd/fix/doc.go b/src/cmd/fix/doc.go
index 0570169576..062eb79285 100644
--- a/src/cmd/fix/doc.go
+++ b/src/cmd/fix/doc.go
@@ -8,6 +8,7 @@ newer ones. After you update to a new Go release, fix helps make
the necessary changes to your programs.
Usage:
+
go tool fix [-r name,...] [path ...]
Without an explicit path, fix reads standard input and writes the
@@ -30,7 +31,7 @@ Fix prints the full list of fixes it can apply in its help output;
to see them, run go tool fix -help.
Fix does not make backup copies of the files that it edits.
-Instead, use a version control system's ``diff'' functionality to inspect
+Instead, use a version control system's “diff” functionality to inspect
the changes that fix makes before committing them.
*/
package main
diff --git a/src/cmd/fix/egltype.go b/src/cmd/fix/egltype.go
index cb0f7a73de..a096db6665 100644
--- a/src/cmd/fix/egltype.go
+++ b/src/cmd/fix/egltype.go
@@ -22,9 +22,13 @@ var eglFixDisplay = fix{
}
// Old state:
-// type EGLDisplay unsafe.Pointer
+//
+// type EGLDisplay unsafe.Pointer
+//
// New state:
-// type EGLDisplay uintptr
+//
+// type EGLDisplay uintptr
+//
// This fix finds nils initializing these types and replaces the nils with 0s.
func eglfixDisp(f *ast.File) bool {
return typefix(f, func(s string) bool {
@@ -41,9 +45,13 @@ var eglFixConfig = fix{
}
// Old state:
-// type EGLConfig unsafe.Pointer
+//
+// type EGLConfig unsafe.Pointer
+//
// New state:
-// type EGLConfig uintptr
+//
+// type EGLConfig uintptr
+//
// This fix finds nils initializing these types and replaces the nils with 0s.
func eglfixConfig(f *ast.File) bool {
return typefix(f, func(s string) bool {
diff --git a/src/cmd/fix/jnitype.go b/src/cmd/fix/jnitype.go
index 29abe0f007..111be8e70c 100644
--- a/src/cmd/fix/jnitype.go
+++ b/src/cmd/fix/jnitype.go
@@ -21,9 +21,13 @@ var jniFix = fix{
}
// Old state:
-// type jobject *_jobject
+//
+// type jobject *_jobject
+//
// New state:
-// type jobject uintptr
+//
+// type jobject uintptr
+//
// and similar for subtypes of jobject.
// This fix finds nils initializing these types and replaces the nils with 0s.
func jnifix(f *ast.File) bool {
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index f9d78b59e3..6fdb4f93a3 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -9,71 +9,69 @@
//
// Usage:
//
-// go [arguments]
+// go [arguments]
//
// The commands are:
//
-// bug start a bug report
-// build compile packages and dependencies
-// clean remove object files and cached files
-// doc show documentation for package or symbol
-// env print Go environment information
-// fix update packages to use new APIs
-// fmt gofmt (reformat) package sources
-// generate generate Go files by processing source
-// get add dependencies to current module and install them
-// install compile and install packages and dependencies
-// list list packages or modules
-// mod module maintenance
-// work workspace maintenance
-// run compile and run Go program
-// test test packages
-// tool run specified go tool
-// version print Go version
-// vet report likely mistakes in packages
+// bug start a bug report
+// build compile packages and dependencies
+// clean remove object files and cached files
+// doc show documentation for package or symbol
+// env print Go environment information
+// fix update packages to use new APIs
+// fmt gofmt (reformat) package sources
+// generate generate Go files by processing source
+// get add dependencies to current module and install them
+// install compile and install packages and dependencies
+// list list packages or modules
+// mod module maintenance
+// work workspace maintenance
+// run compile and run Go program
+// test test packages
+// tool run specified go tool
+// version print Go version
+// vet report likely mistakes in packages
//
// Use "go help " for more information about a command.
//
// Additional help topics:
//
-// buildconstraint build constraints
-// buildmode build modes
-// c calling between Go and C
-// cache build and test caching
-// environment environment variables
-// filetype file types
-// go.mod the go.mod file
-// gopath GOPATH environment variable
-// gopath-get legacy GOPATH go get
-// goproxy module proxy protocol
-// importpath import path syntax
-// modules modules, module versions, and more
-// module-get module-aware go get
-// module-auth module authentication using go.sum
-// packages package lists and patterns
-// private configuration for downloading non-public code
-// testflag testing flags
-// testfunc testing functions
-// vcs controlling version control with GOVCS
+// buildconstraint build constraints
+// buildmode build modes
+// c calling between Go and C
+// cache build and test caching
+// environment environment variables
+// filetype file types
+// go.mod the go.mod file
+// gopath GOPATH environment variable
+// gopath-get legacy GOPATH go get
+// goproxy module proxy protocol
+// importpath import path syntax
+// modules modules, module versions, and more
+// module-get module-aware go get
+// module-auth module authentication using go.sum
+// packages package lists and patterns
+// private configuration for downloading non-public code
+// testflag testing flags
+// testfunc testing functions
+// vcs controlling version control with GOVCS
//
// Use "go help " for more information about that topic.
//
-//
-// Start a bug report
+// # Start a bug report
//
// Usage:
//
-// go bug
+// go bug
//
// Bug opens the default browser and starts a new bug report.
// The report includes useful system information.
//
-//
-// Compile packages and dependencies
+// # Compile packages and dependencies
//
// Usage:
//
-// go build [-o output] [build flags] [packages]
+// go build [-o output] [build flags] [packages]
//
// Build compiles the packages named by the import paths,
// along with their dependencies, but it does not install the results.
@@ -105,110 +103,112 @@
// The build flags are shared by the build, clean, get, install, list, run,
// and test commands:
//
-// -a
-// force rebuilding of packages that are already up-to-date.
-// -n
-// print the commands but do not run them.
-// -p n
-// the number of programs, such as build commands or
-// test binaries, that can be run in parallel.
-// The default is GOMAXPROCS, normally the number of CPUs available.
-// -race
-// enable data race detection.
-// Supported only on linux/amd64, freebsd/amd64, darwin/amd64, darwin/arm64, windows/amd64,
-// linux/ppc64le and linux/arm64 (only for 48-bit VMA).
-// -msan
-// enable interoperation with memory sanitizer.
-// Supported only on linux/amd64, linux/arm64
-// and only with Clang/LLVM as the host C compiler.
-// On linux/arm64, pie build mode will be used.
-// -asan
-// enable interoperation with address sanitizer.
-// Supported only on linux/arm64, linux/amd64.
-// -v
-// print the names of packages as they are compiled.
-// -work
-// print the name of the temporary work directory and
-// do not delete it when exiting.
-// -x
-// print the commands.
+// -a
+// force rebuilding of packages that are already up-to-date.
+// -n
+// print the commands but do not run them.
+// -p n
+// the number of programs, such as build commands or
+// test binaries, that can be run in parallel.
+// The default is GOMAXPROCS, normally the number of CPUs available.
+// -race
+// enable data race detection.
+// Supported only on linux/amd64, freebsd/amd64, darwin/amd64, darwin/arm64, windows/amd64,
+// linux/ppc64le and linux/arm64 (only for 48-bit VMA).
+// -msan
+// enable interoperation with memory sanitizer.
+// Supported only on linux/amd64, linux/arm64
+// and only with Clang/LLVM as the host C compiler.
+// On linux/arm64, pie build mode will be used.
+// -asan
+// enable interoperation with address sanitizer.
+// Supported only on linux/arm64, linux/amd64.
+// -v
+// print the names of packages as they are compiled.
+// -work
+// print the name of the temporary work directory and
+// do not delete it when exiting.
+// -x
+// print the commands.
//
-// -asmflags '[pattern=]arg list'
-// arguments to pass on each go tool asm invocation.
-// -buildmode mode
-// build mode to use. See 'go help buildmode' for more.
-// -buildvcs
-// Whether to stamp binaries with version control information. By default,
-// version control information is stamped into a binary if the main package
-// and the main module containing it are in the repository containing the
-// current directory (if there is a repository). Use -buildvcs=false to
-// omit version control information.
-// -compiler name
-// name of compiler to use, as in runtime.Compiler (gccgo or gc).
-// -gccgoflags '[pattern=]arg list'
-// arguments to pass on each gccgo compiler/linker invocation.
-// -gcflags '[pattern=]arg list'
-// arguments to pass on each go tool compile invocation.
-// -installsuffix suffix
-// a suffix to use in the name of the package installation directory,
-// in order to keep output separate from default builds.
-// If using the -race flag, the install suffix is automatically set to race
-// or, if set explicitly, has _race appended to it. Likewise for the -msan
-// and -asan flags. Using a -buildmode option that requires non-default compile
-// flags has a similar effect.
-// -ldflags '[pattern=]arg list'
-// arguments to pass on each go tool link invocation.
-// -linkshared
-// build code that will be linked against shared libraries previously
-// created with -buildmode=shared.
-// -mod mode
-// module download mode to use: readonly, vendor, or mod.
-// By default, if a vendor directory is present and the go version in go.mod
-// is 1.14 or higher, the go command acts as if -mod=vendor were set.
-// Otherwise, the go command acts as if -mod=readonly were set.
-// See https://golang.org/ref/mod#build-commands for details.
-// -modcacherw
-// leave newly-created directories in the module cache read-write
-// instead of making them read-only.
-// -modfile file
-// in module aware mode, read (and possibly write) an alternate go.mod
-// file instead of the one in the module root directory. A file named
-// "go.mod" must still be present in order to determine the module root
-// directory, but it is not accessed. When -modfile is specified, an
-// alternate go.sum file is also used: its path is derived from the
-// -modfile flag by trimming the ".mod" extension and appending ".sum".
-// -overlay file
-// read a JSON config file that provides an overlay for build operations.
-// The file is a JSON struct with a single field, named 'Replace', that
-// maps each disk file path (a string) to its backing file path, so that
-// a build will run as if the disk file path exists with the contents
-// given by the backing file paths, or as if the disk file path does not
-// exist if its backing file path is empty. Support for the -overlay flag
-// has some limitations: importantly, cgo files included from outside the
-// include path must be in the same directory as the Go package they are
-// included from, and overlays will not appear when binaries and tests are
-// run through go run and go test respectively.
-// -pkgdir dir
-// install and load all packages from dir instead of the usual locations.
-// For example, when building with a non-standard configuration,
-// use -pkgdir to keep generated packages in a separate location.
-// -tags tag,list
-// a comma-separated list of build tags to consider satisfied during the
-// build. For more information about build tags, see the description of
-// build constraints in the documentation for the go/build package.
-// (Earlier versions of Go used a space-separated list, and that form
-// is deprecated but still recognized.)
-// -trimpath
-// remove all file system paths from the resulting executable.
-// Instead of absolute file system paths, the recorded file names
-// will begin either a module path@version (when using modules),
-// or a plain import path (when using the standard library, or GOPATH).
-// -toolexec 'cmd args'
-// a program to use to invoke toolchain programs like vet and asm.
-// For example, instead of running asm, the go command will run
-// 'cmd args /path/to/asm '.
-// The TOOLEXEC_IMPORTPATH environment variable will be set,
-// matching 'go list -f {{.ImportPath}}' for the package being built.
+// -asmflags '[pattern=]arg list'
+// arguments to pass on each go tool asm invocation.
+// -buildmode mode
+// build mode to use. See 'go help buildmode' for more.
+// -buildvcs
+// Whether to stamp binaries with version control information
+// ("true", "false", or "auto"). By default ("auto"), version control
+// information is stamped into a binary if the main package, the main module
+// containing it, and the current directory are all in the same repository.
+// Use -buildvcs=false to always omit version control information, or
+// -buildvcs=true to error out if version control information is available but
+// cannot be included due to a missing tool or ambiguous directory structure.
+// -compiler name
+// name of compiler to use, as in runtime.Compiler (gccgo or gc).
+// -gccgoflags '[pattern=]arg list'
+// arguments to pass on each gccgo compiler/linker invocation.
+// -gcflags '[pattern=]arg list'
+// arguments to pass on each go tool compile invocation.
+// -installsuffix suffix
+// a suffix to use in the name of the package installation directory,
+// in order to keep output separate from default builds.
+// If using the -race flag, the install suffix is automatically set to race
+// or, if set explicitly, has _race appended to it. Likewise for the -msan
+// and -asan flags. Using a -buildmode option that requires non-default compile
+// flags has a similar effect.
+// -ldflags '[pattern=]arg list'
+// arguments to pass on each go tool link invocation.
+// -linkshared
+// build code that will be linked against shared libraries previously
+// created with -buildmode=shared.
+// -mod mode
+// module download mode to use: readonly, vendor, or mod.
+// By default, if a vendor directory is present and the go version in go.mod
+// is 1.14 or higher, the go command acts as if -mod=vendor were set.
+// Otherwise, the go command acts as if -mod=readonly were set.
+// See https://golang.org/ref/mod#build-commands for details.
+// -modcacherw
+// leave newly-created directories in the module cache read-write
+// instead of making them read-only.
+// -modfile file
+// in module aware mode, read (and possibly write) an alternate go.mod
+// file instead of the one in the module root directory. A file named
+// "go.mod" must still be present in order to determine the module root
+// directory, but it is not accessed. When -modfile is specified, an
+// alternate go.sum file is also used: its path is derived from the
+// -modfile flag by trimming the ".mod" extension and appending ".sum".
+// -overlay file
+// read a JSON config file that provides an overlay for build operations.
+// The file is a JSON struct with a single field, named 'Replace', that
+// maps each disk file path (a string) to its backing file path, so that
+// a build will run as if the disk file path exists with the contents
+// given by the backing file paths, or as if the disk file path does not
+// exist if its backing file path is empty. Support for the -overlay flag
+// has some limitations: importantly, cgo files included from outside the
+// include path must be in the same directory as the Go package they are
+// included from, and overlays will not appear when binaries and tests are
+// run through go run and go test respectively.
+// -pkgdir dir
+// install and load all packages from dir instead of the usual locations.
+// For example, when building with a non-standard configuration,
+// use -pkgdir to keep generated packages in a separate location.
+// -tags tag,list
+// a comma-separated list of build tags to consider satisfied during the
+// build. For more information about build tags, see the description of
+// build constraints in the documentation for the go/build package.
+// (Earlier versions of Go used a space-separated list, and that form
+// is deprecated but still recognized.)
+// -trimpath
+// remove all file system paths from the resulting executable.
+// Instead of absolute file system paths, the recorded file names
+// will begin either a module path@version (when using modules),
+// or a plain import path (when using the standard library, or GOPATH).
+// -toolexec 'cmd args'
+// a program to use to invoke toolchain programs like vet and asm.
+// For example, instead of running asm, the go command will run
+// 'cmd args /path/to/asm '.
+// The TOOLEXEC_IMPORTPATH environment variable will be set,
+// matching 'go list -f {{.ImportPath}}' for the package being built.
//
// The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a
// space-separated list of arguments to pass to an underlying tool
@@ -240,12 +240,11 @@
//
// See also: go install, go get, go clean.
//
-//
-// Remove object files and cached files
+// # Remove object files and cached files
//
// Usage:
//
-// go clean [clean flags] [build flags] [packages]
+// go clean [clean flags] [build flags] [packages]
//
// Clean removes object files from package source directories.
// The go command builds most objects in a temporary directory,
@@ -256,17 +255,17 @@
// clean removes the following files from each of the
// source directories corresponding to the import paths:
//
-// _obj/ old object directory, left from Makefiles
-// _test/ old test directory, left from Makefiles
-// _testmain.go old gotest file, left from Makefiles
-// test.out old test log, left from Makefiles
-// build.out old test log, left from Makefiles
-// *.[568ao] object files, left from Makefiles
+// _obj/ old object directory, left from Makefiles
+// _test/ old test directory, left from Makefiles
+// _testmain.go old gotest file, left from Makefiles
+// test.out old test log, left from Makefiles
+// build.out old test log, left from Makefiles
+// *.[568ao] object files, left from Makefiles
//
-// DIR(.exe) from go build
-// DIR.test(.exe) from go test -c
-// MAINFILE(.exe) from go build MAINFILE.go
-// *.so from SWIG
+// DIR(.exe) from go build
+// DIR.test(.exe) from go test -c
+// MAINFILE(.exe) from go build MAINFILE.go
+// *.so from SWIG
//
// In the list, DIR represents the final path element of the
// directory, and MAINFILE is the base name of any Go source
@@ -304,12 +303,11 @@
//
// For more about specifying packages, see 'go help packages'.
//
-//
-// Show documentation for package or symbol
+// # Show documentation for package or symbol
//
// Usage:
//
-// go doc [doc flags] [package|[package.]symbol[.methodOrField]]
+// go doc [doc flags] [package|[package.]symbol[.methodOrField]]
//
// Doc prints the documentation comments associated with the item identified by its
// arguments (a package, const, func, type, var, method, or struct field)
@@ -321,7 +319,7 @@
//
// Given no arguments, that is, when run as
//
-// go doc
+// go doc
//
// it prints the package documentation for the package in the current directory.
// If the package is a command (package main), the exported symbols of the package
@@ -332,10 +330,10 @@
// on what is installed in GOROOT and GOPATH, as well as the form of the argument,
// which is schematically one of these:
//
-// go doc
-// go doc [.]
-// go doc [.][.]
-// go doc [.][.]
+// go doc
+// go doc [.]
+// go doc [.][.]
+// go doc [.][.]
//
// The first item in this list matched by the argument is the one whose documentation
// is printed. (See the examples below.) However, if the argument starts with a capital
@@ -357,7 +355,7 @@
// When run with two arguments, the first is a package path (full path or suffix),
// and the second is a symbol, or symbol with method or struct field:
//
-// go doc [.]
+// go doc [.]
//
// In all forms, when matching symbols, lower-case letters in the argument match
// either case but upper-case letters match exactly. This means that there may be
@@ -365,68 +363,69 @@
// different cases. If this occurs, documentation for all matches is printed.
//
// Examples:
-// go doc
-// Show documentation for current package.
-// go doc Foo
-// Show documentation for Foo in the current package.
-// (Foo starts with a capital letter so it cannot match
-// a package path.)
-// go doc encoding/json
-// Show documentation for the encoding/json package.
-// go doc json
-// Shorthand for encoding/json.
-// go doc json.Number (or go doc json.number)
-// Show documentation and method summary for json.Number.
-// go doc json.Number.Int64 (or go doc json.number.int64)
-// Show documentation for json.Number's Int64 method.
-// go doc cmd/doc
-// Show package docs for the doc command.
-// go doc -cmd cmd/doc
-// Show package docs and exported symbols within the doc command.
-// go doc template.new
-// Show documentation for html/template's New function.
-// (html/template is lexically before text/template)
-// go doc text/template.new # One argument
-// Show documentation for text/template's New function.
-// go doc text/template new # Two arguments
-// Show documentation for text/template's New function.
//
-// At least in the current tree, these invocations all print the
-// documentation for json.Decoder's Decode method:
+// go doc
+// Show documentation for current package.
+// go doc Foo
+// Show documentation for Foo in the current package.
+// (Foo starts with a capital letter so it cannot match
+// a package path.)
+// go doc encoding/json
+// Show documentation for the encoding/json package.
+// go doc json
+// Shorthand for encoding/json.
+// go doc json.Number (or go doc json.number)
+// Show documentation and method summary for json.Number.
+// go doc json.Number.Int64 (or go doc json.number.int64)
+// Show documentation for json.Number's Int64 method.
+// go doc cmd/doc
+// Show package docs for the doc command.
+// go doc -cmd cmd/doc
+// Show package docs and exported symbols within the doc command.
+// go doc template.new
+// Show documentation for html/template's New function.
+// (html/template is lexically before text/template)
+// go doc text/template.new # One argument
+// Show documentation for text/template's New function.
+// go doc text/template new # Two arguments
+// Show documentation for text/template's New function.
//
-// go doc json.Decoder.Decode
-// go doc json.decoder.decode
-// go doc json.decode
-// cd go/src/encoding/json; go doc decode
+// At least in the current tree, these invocations all print the
+// documentation for json.Decoder's Decode method:
+//
+// go doc json.Decoder.Decode
+// go doc json.decoder.decode
+// go doc json.decode
+// cd go/src/encoding/json; go doc decode
//
// Flags:
-// -all
-// Show all the documentation for the package.
-// -c
-// Respect case when matching symbols.
-// -cmd
-// Treat a command (package main) like a regular package.
-// Otherwise package main's exported symbols are hidden
-// when showing the package's top-level documentation.
-// -short
-// One-line representation for each symbol.
-// -src
-// Show the full source code for the symbol. This will
-// display the full Go source of its declaration and
-// definition, such as a function definition (including
-// the body), type declaration or enclosing const
-// block. The output may therefore include unexported
-// details.
-// -u
-// Show documentation for unexported as well as exported
-// symbols, methods, and fields.
//
+// -all
+// Show all the documentation for the package.
+// -c
+// Respect case when matching symbols.
+// -cmd
+// Treat a command (package main) like a regular package.
+// Otherwise package main's exported symbols are hidden
+// when showing the package's top-level documentation.
+// -short
+// One-line representation for each symbol.
+// -src
+// Show the full source code for the symbol. This will
+// display the full Go source of its declaration and
+// definition, such as a function definition (including
+// the body), type declaration or enclosing const
+// block. The output may therefore include unexported
+// details.
+// -u
+// Show documentation for unexported as well as exported
+// symbols, methods, and fields.
//
-// Print Go environment information
+// # Print Go environment information
//
// Usage:
//
-// go env [-json] [-u] [-w] [var ...]
+// go env [-json] [-u] [-w] [var ...]
//
// Env prints Go environment information.
//
@@ -448,12 +447,11 @@
//
// For more about environment variables, see 'go help environment'.
//
-//
-// Update packages to use new APIs
+// # Update packages to use new APIs
//
// Usage:
//
-// go fix [-fix list] [packages]
+// go fix [-fix list] [packages]
//
// Fix runs the Go fix command on the packages named by the import paths.
//
@@ -468,12 +466,11 @@
//
// See also: go fmt, go vet.
//
-//
-// Gofmt (reformat) package sources
+// # Gofmt (reformat) package sources
//
// Usage:
//
-// go fmt [-n] [-x] [packages]
+// go fmt [-n] [-x] [packages]
//
// Fmt runs the command 'gofmt -l -w' on the packages named
// by the import paths. It prints the names of the files that are modified.
@@ -491,12 +488,11 @@
//
// See also: go fix, go vet.
//
-//
-// Generate Go files by processing source
+// # Generate Go files by processing source
//
// Usage:
//
-// go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
+// go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
//
// Generate runs commands described by directives within existing
// files. Those commands can run any process but the intent is to
@@ -508,7 +504,7 @@
// Go generate scans the file for directives, which are lines of
// the form,
//
-// //go:generate command argument...
+// //go:generate command argument...
//
// (note: no leading spaces and no space in "//go") where command
// is the generator to be run, corresponding to an executable file
@@ -531,25 +527,28 @@
// generated source should have a line that matches the following
// regular expression (in Go syntax):
//
-// ^// Code generated .* DO NOT EDIT\.$
+// ^// Code generated .* DO NOT EDIT\.$
//
// This line must appear before the first non-comment, non-blank
// text in the file.
//
// Go generate sets several variables when it runs the generator:
//
-// $GOARCH
-// The execution architecture (arm, amd64, etc.)
-// $GOOS
-// The execution operating system (linux, windows, etc.)
-// $GOFILE
-// The base name of the file.
-// $GOLINE
-// The line number of the directive in the source file.
-// $GOPACKAGE
-// The name of the package of the file containing the directive.
-// $DOLLAR
-// A dollar sign.
+// $GOARCH
+// The execution architecture (arm, amd64, etc.)
+// $GOOS
+// The execution operating system (linux, windows, etc.)
+// $GOFILE
+// The base name of the file.
+// $GOLINE
+// The line number of the directive in the source file.
+// $GOPACKAGE
+// The name of the package of the file containing the directive.
+// $GOROOT
+// The GOROOT directory for the 'go' command that invoked the
+// generator, containing the Go toolchain and standard library.
+// $DOLLAR
+// A dollar sign.
//
// Other than variable substitution and quoted-string evaluation, no
// special processing such as "globbing" is performed on the command
@@ -565,14 +564,14 @@
//
// A directive of the form,
//
-// //go:generate -command xxx args...
+// //go:generate -command xxx args...
//
// specifies, for the remainder of this source file only, that the
// string xxx represents the command identified by the arguments. This
// can be used to create aliases or to handle multiword generators.
// For example,
//
-// //go:generate -command foo go tool foo
+// //go:generate -command foo go tool foo
//
// specifies that the command "foo" represents the generator
// "go tool foo".
@@ -596,11 +595,11 @@
//
// Go generate accepts one specific flag:
//
-// -run=""
-// if non-empty, specifies a regular expression to select
-// directives whose full original source text (excluding
-// any trailing spaces and final newline) matches the
-// expression.
+// -run=""
+// if non-empty, specifies a regular expression to select
+// directives whose full original source text (excluding
+// any trailing spaces and final newline) matches the
+// expression.
//
// It also accepts the standard build flags including -v, -n, and -x.
// The -v flag prints the names of packages and files as they are
@@ -612,12 +611,11 @@
//
// For more about specifying packages, see 'go help packages'.
//
-//
-// Add dependencies to current module and install them
+// # Add dependencies to current module and install them
//
// Usage:
//
-// go get [-t] [-u] [-v] [build flags] [packages]
+// go get [-t] [-u] [-v] [build flags] [packages]
//
// Get resolves its command-line arguments to packages at specific module versions,
// updates go.mod to require those versions, and downloads source code into the
@@ -625,15 +623,15 @@
//
// To add a dependency for a package or upgrade it to its latest version:
//
-// go get example.com/pkg
+// go get example.com/pkg
//
// To upgrade or downgrade a package to a specific version:
//
-// go get example.com/pkg@v1.2.3
+// go get example.com/pkg@v1.2.3
//
// To remove a dependency on a module and downgrade modules that require it:
//
-// go get example.com/mod@none
+// go get example.com/mod@none
//
// See https://golang.org/ref/mod#go-get for details.
//
@@ -643,8 +641,8 @@
// 'go install' runs in module-aware mode and ignores the go.mod file in the
// current directory. For example:
//
-// go install example.com/pkg@v1.2.3
-// go install example.com/pkg@latest
+// go install example.com/pkg@v1.2.3
+// go install example.com/pkg@latest
//
// See 'go help install' or https://golang.org/ref/mod#go-install for details.
//
@@ -678,12 +676,11 @@
//
// See also: go build, go install, go clean, go mod.
//
-//
-// Compile and install packages and dependencies
+// # Compile and install packages and dependencies
//
// Usage:
//
-// go install [build flags] [packages]
+// go install [build flags] [packages]
//
// Install compiles and installs the packages named by the import paths.
//
@@ -738,12 +735,11 @@
//
// See also: go build, go get, go clean.
//
-//
-// List packages or modules
+// # List packages or modules
//
// Usage:
//
-// go list [-f format] [-json] [-m] [list flags] [build flags] [packages]
+// go list [-f format] [-json] [-m] [list flags] [build flags] [packages]
//
// List lists the named packages, one per line.
// The most commonly-used flags are -f and -json, which control the form
@@ -752,83 +748,83 @@
//
// The default output shows the package import path:
//
-// bytes
-// encoding/json
-// github.com/gorilla/mux
-// golang.org/x/net/html
+// bytes
+// encoding/json
+// github.com/gorilla/mux
+// golang.org/x/net/html
//
// The -f flag specifies an alternate format for the list, using the
// syntax of package template. The default output is equivalent
// to -f '{{.ImportPath}}'. The struct being passed to the template is:
//
-// type Package struct {
-// Dir string // directory containing package sources
-// ImportPath string // import path of package in dir
-// ImportComment string // path in import comment on package statement
-// Name string // package name
-// Doc string // package documentation string
-// Target string // install path
-// Shlib string // the shared library that contains this package (only set when -linkshared)
-// Goroot bool // is this package in the Go root?
-// Standard bool // is this package part of the standard Go library?
-// Stale bool // would 'go install' do anything for this package?
-// StaleReason string // explanation for Stale==true
-// Root string // Go root or Go path dir containing this package
-// ConflictDir string // this directory shadows Dir in $GOPATH
-// BinaryOnly bool // binary-only package (no longer supported)
-// ForTest string // package is only for use in named test
-// Export string // file containing export data (when using -export)
-// BuildID string // build ID of the compiled package (when using -export)
-// Module *Module // info about package's containing module, if any (can be nil)
-// Match []string // command-line patterns matching this package
-// DepOnly bool // package is only a dependency, not explicitly listed
+// type Package struct {
+// Dir string // directory containing package sources
+// ImportPath string // import path of package in dir
+// ImportComment string // path in import comment on package statement
+// Name string // package name
+// Doc string // package documentation string
+// Target string // install path
+// Shlib string // the shared library that contains this package (only set when -linkshared)
+// Goroot bool // is this package in the Go root?
+// Standard bool // is this package part of the standard Go library?
+// Stale bool // would 'go install' do anything for this package?
+// StaleReason string // explanation for Stale==true
+// Root string // Go root or Go path dir containing this package
+// ConflictDir string // this directory shadows Dir in $GOPATH
+// BinaryOnly bool // binary-only package (no longer supported)
+// ForTest string // package is only for use in named test
+// Export string // file containing export data (when using -export)
+// BuildID string // build ID of the compiled package (when using -export)
+// Module *Module // info about package's containing module, if any (can be nil)
+// Match []string // command-line patterns matching this package
+// DepOnly bool // package is only a dependency, not explicitly listed
//
-// // Source files
-// GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
-// CgoFiles []string // .go source files that import "C"
-// CompiledGoFiles []string // .go files presented to compiler (when using -compiled)
-// IgnoredGoFiles []string // .go source files ignored due to build constraints
-// IgnoredOtherFiles []string // non-.go source files ignored due to build constraints
-// CFiles []string // .c source files
-// CXXFiles []string // .cc, .cxx and .cpp source files
-// MFiles []string // .m source files
-// HFiles []string // .h, .hh, .hpp and .hxx source files
-// FFiles []string // .f, .F, .for and .f90 Fortran source files
-// SFiles []string // .s source files
-// SwigFiles []string // .swig files
-// SwigCXXFiles []string // .swigcxx files
-// SysoFiles []string // .syso object files to add to archive
-// TestGoFiles []string // _test.go files in package
-// XTestGoFiles []string // _test.go files outside package
+// // Source files
+// GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+// CgoFiles []string // .go source files that import "C"
+// CompiledGoFiles []string // .go files presented to compiler (when using -compiled)
+// IgnoredGoFiles []string // .go source files ignored due to build constraints
+// IgnoredOtherFiles []string // non-.go source files ignored due to build constraints
+// CFiles []string // .c source files
+// CXXFiles []string // .cc, .cxx and .cpp source files
+// MFiles []string // .m source files
+// HFiles []string // .h, .hh, .hpp and .hxx source files
+// FFiles []string // .f, .F, .for and .f90 Fortran source files
+// SFiles []string // .s source files
+// SwigFiles []string // .swig files
+// SwigCXXFiles []string // .swigcxx files
+// SysoFiles []string // .syso object files to add to archive
+// TestGoFiles []string // _test.go files in package
+// XTestGoFiles []string // _test.go files outside package
//
-// // Embedded files
-// EmbedPatterns []string // //go:embed patterns
-// EmbedFiles []string // files matched by EmbedPatterns
-// TestEmbedPatterns []string // //go:embed patterns in TestGoFiles
-// TestEmbedFiles []string // files matched by TestEmbedPatterns
-// XTestEmbedPatterns []string // //go:embed patterns in XTestGoFiles
-// XTestEmbedFiles []string // files matched by XTestEmbedPatterns
+// // Embedded files
+// EmbedPatterns []string // //go:embed patterns
+// EmbedFiles []string // files matched by EmbedPatterns
+// TestEmbedPatterns []string // //go:embed patterns in TestGoFiles
+// TestEmbedFiles []string // files matched by TestEmbedPatterns
+// XTestEmbedPatterns []string // //go:embed patterns in XTestGoFiles
+// XTestEmbedFiles []string // files matched by XTestEmbedPatterns
//
-// // Cgo directives
-// CgoCFLAGS []string // cgo: flags for C compiler
-// CgoCPPFLAGS []string // cgo: flags for C preprocessor
-// CgoCXXFLAGS []string // cgo: flags for C++ compiler
-// CgoFFLAGS []string // cgo: flags for Fortran compiler
-// CgoLDFLAGS []string // cgo: flags for linker
-// CgoPkgConfig []string // cgo: pkg-config names
+// // Cgo directives
+// CgoCFLAGS []string // cgo: flags for C compiler
+// CgoCPPFLAGS []string // cgo: flags for C preprocessor
+// CgoCXXFLAGS []string // cgo: flags for C++ compiler
+// CgoFFLAGS []string // cgo: flags for Fortran compiler
+// CgoLDFLAGS []string // cgo: flags for linker
+// CgoPkgConfig []string // cgo: pkg-config names
//
-// // Dependency information
-// Imports []string // import paths used by this package
-// ImportMap map[string]string // map from source import to ImportPath (identity entries omitted)
-// Deps []string // all (recursively) imported dependencies
-// TestImports []string // imports from TestGoFiles
-// XTestImports []string // imports from XTestGoFiles
+// // Dependency information
+// Imports []string // import paths used by this package
+// ImportMap map[string]string // map from source import to ImportPath (identity entries omitted)
+// Deps []string // all (recursively) imported dependencies
+// TestImports []string // imports from TestGoFiles
+// XTestImports []string // imports from XTestGoFiles
//
-// // Error information
-// Incomplete bool // this package or a dependency has an error
-// Error *PackageError // error loading package
-// DepsErrors []*PackageError // errors loading dependencies
-// }
+// // Error information
+// Incomplete bool // this package or a dependency has an error
+// Error *PackageError // error loading package
+// DepsErrors []*PackageError // errors loading dependencies
+// }
//
// Packages stored in vendor directories report an ImportPath that includes the
// path to the vendor directory (for example, "d/vendor/p" instead of "p"),
@@ -838,11 +834,11 @@
//
// The error information, if any, is
//
-// type PackageError struct {
-// ImportStack []string // shortest path from package named on command line to this one
-// Pos string // position of error (if present, file:line:col)
-// Err string // the error itself
-// }
+// type PackageError struct {
+// ImportStack []string // shortest path from package named on command line to this one
+// Pos string // position of error (if present, file:line:col)
+// Err string // the error itself
+// }
//
// The module information is a Module struct, defined in the discussion
// of list -m below.
@@ -851,19 +847,19 @@
//
// The template function "context" returns the build context, defined as:
//
-// type Context struct {
-// GOARCH string // target architecture
-// GOOS string // target operating system
-// GOROOT string // Go root
-// GOPATH string // Go path
-// CgoEnabled bool // whether cgo can be used
-// UseAllFiles bool // use files regardless of +build lines, file names
-// Compiler string // compiler to assume when computing target paths
-// BuildTags []string // build constraints to match in +build lines
-// ToolTags []string // toolchain-specific build constraints
-// ReleaseTags []string // releases the current release is compatible with
-// InstallSuffix string // suffix to use in the name of the install dir
-// }
+// type Context struct {
+// GOARCH string // target architecture
+// GOOS string // target operating system
+// GOROOT string // Go root
+// GOPATH string // Go path
+// CgoEnabled bool // whether cgo can be used
+// UseAllFiles bool // use files regardless of +build lines, file names
+// Compiler string // compiler to assume when computing target paths
+// BuildTags []string // build constraints to match in +build lines
+// ToolTags []string // toolchain-specific build constraints
+// ReleaseTags []string // releases the current release is compatible with
+// InstallSuffix string // suffix to use in the name of the install dir
+// }
//
// For more information about the meaning of these fields see the documentation
// for the go/build package's Context type.
@@ -930,25 +926,25 @@
// When listing modules, the -f flag still specifies a format template
// applied to a Go struct, but now a Module struct:
//
-// type Module struct {
-// Path string // module path
-// Version string // module version
-// Versions []string // available module versions (with -versions)
-// Replace *Module // replaced by this module
-// Time *time.Time // time version was created
-// Update *Module // available update, if any (with -u)
-// Main bool // is this the main module?
-// Indirect bool // is this module only an indirect dependency of main module?
-// Dir string // directory holding files for this module, if any
-// GoMod string // path to go.mod file used when loading this module, if any
-// GoVersion string // go version used in module
-// Retracted string // retraction information, if any (with -retracted or -u)
-// Error *ModuleError // error loading module
-// }
+// type Module struct {
+// Path string // module path
+// Version string // module version
+// Versions []string // available module versions (with -versions)
+// Replace *Module // replaced by this module
+// Time *time.Time // time version was created
+// Update *Module // available update, if any (with -u)
+// Main bool // is this the main module?
+// Indirect bool // is this module only an indirect dependency of main module?
+// Dir string // directory holding files for this module, if any
+// GoMod string // path to go.mod file used when loading this module, if any
+// GoVersion string // go version used in module
+// Retracted string // retraction information, if any (with -retracted or -u)
+// Error *ModuleError // error loading module
+// }
//
-// type ModuleError struct {
-// Err string // the error itself
-// }
+// type ModuleError struct {
+// Err string // the error itself
+// }
//
// The file GoMod refers to may be outside the module directory if the
// module is in the module cache or if the -modfile flag is used.
@@ -957,9 +953,9 @@
// information about the version and replacement if any.
// For example, 'go list -m all' might print:
//
-// my/main/module
-// golang.org/x/text v0.3.0 => /tmp/text
-// rsc.io/pdf v0.1.1
+// my/main/module
+// golang.org/x/text v0.3.0 => /tmp/text
+// rsc.io/pdf v0.1.1
//
// The Module struct has a String method that formats this
// line of output, so that the default format is equivalent
@@ -981,9 +977,9 @@
// If a version is retracted, the string "(retracted)" will follow it.
// For example, 'go list -m -u all' might print:
//
-// my/main/module
-// golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text
-// rsc.io/pdf v0.1.1 (retracted) [v0.1.2]
+// my/main/module
+// golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text
+// rsc.io/pdf v0.1.1 (retracted) [v0.1.2]
//
// (For tools, 'go list -m -u -json all' may be more convenient to parse.)
//
@@ -1026,8 +1022,7 @@
//
// For more about modules, see https://golang.org/ref/mod.
//
-//
-// Module maintenance
+// # Module maintenance
//
// Go mod provides access to operations on modules.
//
@@ -1038,26 +1033,26 @@
//
// Usage:
//
-// go mod [arguments]
+// go mod [arguments]
//
// The commands are:
//
-// download download modules to local cache
-// edit edit go.mod from tools or scripts
-// graph print module requirement graph
-// init initialize new module in current directory
-// tidy add missing and remove unused modules
-// vendor make vendored copy of dependencies
-// verify verify dependencies have expected content
-// why explain why packages or modules are needed
+// download download modules to local cache
+// edit edit go.mod from tools or scripts
+// graph print module requirement graph
+// init initialize new module in current directory
+// tidy add missing and remove unused modules
+// vendor make vendored copy of dependencies
+// verify verify dependencies have expected content
+// why explain why packages or modules are needed
//
// Use "go help mod " for more information about a command.
//
-// Download modules to local cache
+// # Download modules to local cache
//
// Usage:
//
-// go mod download [-x] [-json] [modules]
+// go mod download [-x] [-json] [modules]
//
// Download downloads the named modules, which can be module patterns selecting
// dependencies of the main module or module queries of the form path@version.
@@ -1078,17 +1073,17 @@
// to standard output, describing each downloaded module (or failure),
// corresponding to this Go struct:
//
-// type Module struct {
-// Path string // module path
-// Version string // module version
-// Error string // error loading module
-// Info string // absolute path to cached .info file
-// GoMod string // absolute path to cached .mod file
-// Zip string // absolute path to cached .zip file
-// Dir string // absolute path to cached source root directory
-// Sum string // checksum for path, version (as in go.sum)
-// GoModSum string // checksum for go.mod (as in go.sum)
-// }
+// type Module struct {
+// Path string // module path
+// Version string // module version
+// Error string // error loading module
+// Info string // absolute path to cached .info file
+// GoMod string // absolute path to cached .mod file
+// Zip string // absolute path to cached .zip file
+// Dir string // absolute path to cached source root directory
+// Sum string // checksum for path, version (as in go.sum)
+// GoModSum string // checksum for go.mod (as in go.sum)
+// }
//
// The -x flag causes download to print the commands download executes.
//
@@ -1096,12 +1091,11 @@
//
// See https://golang.org/ref/mod#version-queries for more about version queries.
//
-//
-// Edit go.mod from tools or scripts
+// # Edit go.mod from tools or scripts
//
// Usage:
//
-// go mod edit [editing flags] [-fmt|-print|-json] [go.mod]
+// go mod edit [editing flags] [-fmt|-print|-json] [go.mod]
//
// Edit provides a command-line interface for editing go.mod,
// for use primarily by tools or scripts. It reads only go.mod;
@@ -1159,41 +1153,41 @@
// The -json flag prints the final go.mod file in JSON format instead of
// writing it back to go.mod. The JSON output corresponds to these Go types:
//
-// type Module struct {
-// Path string
-// Version string
-// }
+// type Module struct {
+// Path string
+// Version string
+// }
//
-// type GoMod struct {
-// Module ModPath
-// Go string
-// Require []Require
-// Exclude []Module
-// Replace []Replace
-// Retract []Retract
-// }
+// type GoMod struct {
+// Module ModPath
+// Go string
+// Require []Require
+// Exclude []Module
+// Replace []Replace
+// Retract []Retract
+// }
//
-// type ModPath struct {
-// Path string
-// Deprecated string
-// }
+// type ModPath struct {
+// Path string
+// Deprecated string
+// }
//
-// type Require struct {
-// Path string
-// Version string
-// Indirect bool
-// }
+// type Require struct {
+// Path string
+// Version string
+// Indirect bool
+// }
//
-// type Replace struct {
-// Old Module
-// New Module
-// }
+// type Replace struct {
+// Old Module
+// New Module
+// }
//
-// type Retract struct {
-// Low string
-// High string
-// Rationale string
-// }
+// type Retract struct {
+// Low string
+// High string
+// Rationale string
+// }
//
// Retract entries representing a single version (not an interval) will have
// the "Low" and "High" fields set to the same value.
@@ -1204,12 +1198,11 @@
//
// See https://golang.org/ref/mod#go-mod-edit for more about 'go mod edit'.
//
-//
-// Print module requirement graph
+// # Print module requirement graph
//
// Usage:
//
-// go mod graph [-go=version]
+// go mod graph [-go=version]
//
// Graph prints the module requirement graph (with replacements applied)
// in text form. Each line in the output has two space-separated fields: a module
@@ -1222,12 +1215,11 @@
//
// See https://golang.org/ref/mod#go-mod-graph for more about 'go mod graph'.
//
-//
-// Initialize new module in current directory
+// # Initialize new module in current directory
//
// Usage:
//
-// go mod init [module-path]
+// go mod init [module-path]
//
// Init initializes and writes a new go.mod file in the current directory, in
// effect creating a new module rooted at the current directory. The go.mod file
@@ -1243,12 +1235,11 @@
//
// See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'.
//
-//
-// Add missing and remove unused modules
+// # Add missing and remove unused modules
//
// Usage:
//
-// go mod tidy [-e] [-v] [-go=version] [-compat=version]
+// go mod tidy [-e] [-v] [-go=version] [-compat=version]
//
// Tidy makes sure go.mod matches the source code in the module.
// It adds any missing modules necessary to build the current module's
@@ -1278,12 +1269,11 @@
//
// See https://golang.org/ref/mod#go-mod-tidy for more about 'go mod tidy'.
//
-//
-// Make vendored copy of dependencies
+// # Make vendored copy of dependencies
//
// Usage:
//
-// go mod vendor [-e] [-v] [-o outdir]
+// go mod vendor [-e] [-v] [-o outdir]
//
// Vendor resets the main module's vendor directory to include all packages
// needed to build and test all the main module's packages.
@@ -1302,12 +1292,11 @@
//
// See https://golang.org/ref/mod#go-mod-vendor for more about 'go mod vendor'.
//
-//
-// Verify dependencies have expected content
+// # Verify dependencies have expected content
//
// Usage:
//
-// go mod verify
+// go mod verify
//
// Verify checks that the dependencies of the current module,
// which are stored in a local downloaded source cache, have not been
@@ -1318,12 +1307,11 @@
//
// See https://golang.org/ref/mod#go-mod-verify for more about 'go mod verify'.
//
-//
-// Explain why packages or modules are needed
+// # Explain why packages or modules are needed
//
// Usage:
//
-// go mod why [-m] [-vendor] packages...
+// go mod why [-m] [-vendor] packages...
//
// Why shows a shortest path in the import graph from the main module to
// each of the listed packages. If the -m flag is given, why treats the
@@ -1344,20 +1332,19 @@
//
// For example:
//
-// $ go mod why golang.org/x/text/language golang.org/x/text/encoding
-// # golang.org/x/text/language
-// rsc.io/quote
-// rsc.io/sampler
-// golang.org/x/text/language
+// $ go mod why golang.org/x/text/language golang.org/x/text/encoding
+// # golang.org/x/text/language
+// rsc.io/quote
+// rsc.io/sampler
+// golang.org/x/text/language
//
-// # golang.org/x/text/encoding
-// (main module does not need package golang.org/x/text/encoding)
-// $
+// # golang.org/x/text/encoding
+// (main module does not need package golang.org/x/text/encoding)
+// $
//
// See https://golang.org/ref/mod#go-mod-why for more about 'go mod why'.
//
-//
-// Workspace maintenance
+// # Workspace maintenance
//
// Work provides access to operations on workspaces.
//
@@ -1382,20 +1369,20 @@
// go.work files are line-oriented. Each line holds a single directive,
// made up of a keyword followed by arguments. For example:
//
-// go 1.18
+// go 1.18
//
-// use ../foo/bar
-// use ./baz
+// use ../foo/bar
+// use ./baz
//
-// replace example.com/foo v1.2.3 => example.com/bar v1.4.5
+// replace example.com/foo v1.2.3 => example.com/bar v1.4.5
//
// The leading keyword can be factored out of adjacent lines to create a block,
// like in Go imports.
//
-// use (
-// ../foo/bar
-// ./baz
-// )
+// use (
+// ../foo/bar
+// ./baz
+// )
//
// The use directive specifies a module to be included in the workspace's
// set of main modules. The argument to the use directive is the directory
@@ -1417,22 +1404,22 @@
//
// Usage:
//
-// go work [arguments]
+// go work [arguments]
//
// The commands are:
//
-// edit edit go.work from tools or scripts
-// init initialize workspace file
-// sync sync workspace build list to modules
-// use add modules to workspace file
+// edit edit go.work from tools or scripts
+// init initialize workspace file
+// sync sync workspace build list to modules
+// use add modules to workspace file
//
// Use "go help work " for more information about a command.
//
-// Edit go.work from tools or scripts
+// # Edit go.work from tools or scripts
//
// Usage:
//
-// go work edit [editing flags] [go.work]
+// go work edit [editing flags] [go.work]
//
// Edit provides a command-line interface for editing go.work,
// for use primarily by tools or scripts. It only reads go.work;
@@ -1473,36 +1460,35 @@
// The -json flag prints the final go.work file in JSON format instead of
// writing it back to go.mod. The JSON output corresponds to these Go types:
//
-// type GoWork struct {
-// Go string
-// Use []Use
-// Replace []Replace
-// }
+// type GoWork struct {
+// Go string
+// Use []Use
+// Replace []Replace
+// }
//
-// type Use struct {
-// DiskPath string
-// ModulePath string
-// }
+// type Use struct {
+// DiskPath string
+// ModulePath string
+// }
//
-// type Replace struct {
-// Old Module
-// New Module
-// }
+// type Replace struct {
+// Old Module
+// New Module
+// }
//
-// type Module struct {
-// Path string
-// Version string
-// }
+// type Module struct {
+// Path string
+// Version string
+// }
//
// See the workspaces reference at https://go.dev/ref/mod#workspaces
// for more information.
//
-//
-// Initialize workspace file
+// # Initialize workspace file
//
// Usage:
//
-// go work init [moddirs]
+// go work init [moddirs]
//
// Init initializes and writes a new go.work file in the
// current directory, in effect creating a new workspace at the current
@@ -1518,12 +1504,11 @@
// See the workspaces reference at https://go.dev/ref/mod#workspaces
// for more information.
//
-//
-// Sync workspace build list to modules
+// # Sync workspace build list to modules
//
// Usage:
//
-// go work sync
+// go work sync
//
// Sync syncs the workspace's build list back to the
// workspace's modules
@@ -1544,12 +1529,11 @@
// See the workspaces reference at https://go.dev/ref/mod#workspaces
// for more information.
//
-//
-// Add modules to workspace file
+// # Add modules to workspace file
//
// Usage:
//
-// go work use [-r] moddirs
+// go work use [-r] moddirs
//
// Use provides a command-line interface for adding
// directories, optionally recursively, to a go.work file.
@@ -1566,12 +1550,11 @@
// See the workspaces reference at https://go.dev/ref/mod#workspaces
// for more information.
//
-//
-// Compile and run Go program
+// # Compile and run Go program
//
// Usage:
//
-// go run [build flags] [-exec xprog] package [arguments...]
+// go run [build flags] [-exec xprog] package [arguments...]
//
// Run compiles and runs the named main Go package.
// Typically the package is specified as a list of .go source files from a single
@@ -1591,7 +1574,9 @@
//
// By default, 'go run' runs the compiled binary directly: 'a.out arguments...'.
// If the -exec flag is given, 'go run' invokes the binary using xprog:
-// 'xprog a.out arguments...'.
+//
+// 'xprog a.out arguments...'.
+//
// If the -exec flag is not given, GOOS or GOARCH is different from the system
// default, and a program named go_$GOOS_$GOARCH_exec can be found
// on the current search path, 'go run' invokes the binary using that program,
@@ -1610,20 +1595,19 @@
//
// See also: go build.
//
-//
-// Test packages
+// # Test packages
//
// Usage:
//
-// go test [build/test flags] [packages] [build/test flags & test binary flags]
+// go test [build/test flags] [packages] [build/test flags & test binary flags]
//
// 'Go test' automates testing the packages named by the import paths.
// It prints a summary of the test results in the format:
//
-// ok archive/tar 0.011s
-// FAIL archive/zip 0.022s
-// ok compress/gzip 0.033s
-// ...
+// ok archive/tar 0.011s
+// FAIL archive/zip 0.022s
+// ok compress/gzip 0.033s
+// ...
//
// followed by detailed output for each failed package.
//
@@ -1702,33 +1686,33 @@
//
// In addition to the build flags, the flags handled by 'go test' itself are:
//
-// -args
-// Pass the remainder of the command line (everything after -args)
-// to the test binary, uninterpreted and unchanged.
-// Because this flag consumes the remainder of the command line,
-// the package list (if present) must appear before this flag.
+// -args
+// Pass the remainder of the command line (everything after -args)
+// to the test binary, uninterpreted and unchanged.
+// Because this flag consumes the remainder of the command line,
+// the package list (if present) must appear before this flag.
//
-// -c
-// Compile the test binary to pkg.test but do not run it
-// (where pkg is the last element of the package's import path).
-// The file name can be changed with the -o flag.
+// -c
+// Compile the test binary to pkg.test but do not run it
+// (where pkg is the last element of the package's import path).
+// The file name can be changed with the -o flag.
//
-// -exec xprog
-// Run the test binary using xprog. The behavior is the same as
-// in 'go run'. See 'go help run' for details.
+// -exec xprog
+// Run the test binary using xprog. The behavior is the same as
+// in 'go run'. See 'go help run' for details.
//
-// -i
-// Install packages that are dependencies of the test.
-// Do not run the test.
-// The -i flag is deprecated. Compiled packages are cached automatically.
+// -i
+// Install packages that are dependencies of the test.
+// Do not run the test.
+// The -i flag is deprecated. Compiled packages are cached automatically.
//
-// -json
-// Convert test output to JSON suitable for automated processing.
-// See 'go doc test2json' for the encoding details.
+// -json
+// Convert test output to JSON suitable for automated processing.
+// See 'go doc test2json' for the encoding details.
//
-// -o file
-// Compile the test binary to the named file.
-// The test still runs (unless -c or -i is specified).
+// -o file
+// Compile the test binary to the named file.
+// The test still runs (unless -c or -i is specified).
//
// The test binary also accepts flags that control execution of the test; these
// flags are also accessible by 'go test'. See 'go help testflag' for details.
@@ -1738,12 +1722,11 @@
//
// See also: go build, go vet.
//
-//
-// Run specified go tool
+// # Run specified go tool
//
// Usage:
//
-// go tool [-n] command [args...]
+// go tool [-n] command [args...]
//
// Tool runs the go tool command identified by the arguments.
// With no arguments it prints the list of known tools.
@@ -1753,12 +1736,11 @@
//
// For more about each tool command, see 'go doc cmd/'.
//
-//
-// Print Go version
+// # Print Go version
//
// Usage:
//
-// go version [-m] [-v] [file ...]
+// go version [-m] [-v] [file ...]
//
// Version prints the build information for Go executables.
//
@@ -1780,12 +1762,11 @@
//
// See also: go doc runtime/debug.BuildInfo.
//
-//
-// Report likely mistakes in packages
+// # Report likely mistakes in packages
//
// Usage:
//
-// go vet [-n] [-x] [-vettool prog] [build flags] [vet flags] [packages]
+// go vet [-n] [-x] [-vettool prog] [build flags] [vet flags] [packages]
//
// Vet runs the Go vet command on the packages named by the import paths.
//
@@ -1801,8 +1782,8 @@
// or additional checks.
// For example, the 'shadow' analyzer can be built and run using these commands:
//
-// go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow
-// go vet -vettool=$(which shadow)
+// go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow
+// go vet -vettool=$(which shadow)
//
// The build flags supported by go vet are those that control package resolution
// and execution, such as -n, -x, -v, -tags, and -toolexec.
@@ -1810,12 +1791,11 @@
//
// See also: go fmt, go fix.
//
-//
-// Build constraints
+// # Build constraints
//
// A build constraint, also known as a build tag, is a line comment that begins
//
-// //go:build
+// //go:build
//
// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but
@@ -1834,31 +1814,33 @@
// build when the "linux" and "386" constraints are satisfied, or when
// "darwin" is satisfied and "cgo" is not:
//
-// //go:build (linux && 386) || (darwin && !cgo)
+// //go:build (linux && 386) || (darwin && !cgo)
//
// It is an error for a file to have more than one //go:build line.
//
// During a particular build, the following words are satisfied:
//
-// - the target operating system, as spelled by runtime.GOOS, set with the
-// GOOS environment variable.
-// - the target architecture, as spelled by runtime.GOARCH, set with the
-// GOARCH environment variable.
-// - "unix", if GOOS is a Unix or Unix-like system.
-// - the compiler being used, either "gc" or "gccgo"
-// - "cgo", if the cgo command is supported (see CGO_ENABLED in
-// 'go help environment').
-// - a term for each Go major release, through the current version:
-// "go1.1" from Go version 1.1 onward, "go1.12" from Go 1.12, and so on.
-// - any additional tags given by the -tags flag (see 'go help build').
+// - the target operating system, as spelled by runtime.GOOS, set with the
+// GOOS environment variable.
+// - the target architecture, as spelled by runtime.GOARCH, set with the
+// GOARCH environment variable.
+// - "unix", if GOOS is a Unix or Unix-like system.
+// - the compiler being used, either "gc" or "gccgo"
+// - "cgo", if the cgo command is supported (see CGO_ENABLED in
+// 'go help environment').
+// - a term for each Go major release, through the current version:
+// "go1.1" from Go version 1.1 onward, "go1.12" from Go 1.12, and so on.
+// - any additional tags given by the -tags flag (see 'go help build').
//
// There are no separate build tags for beta or minor releases.
//
// If a file's name, after stripping the extension and a possible _test suffix,
// matches any of the following patterns:
-// *_GOOS
-// *_GOARCH
-// *_GOOS_GOARCH
+//
+// *_GOOS
+// *_GOARCH
+// *_GOOS_GOARCH
+//
// (example: source_windows_amd64.go) where GOOS and GOARCH represent
// any known operating system and architecture values respectively, then
// the file is considered to have an implicit build constraint requiring
@@ -1875,19 +1857,19 @@
//
// To keep a file from being considered for the build:
//
-// //go:build ignore
+// //go:build ignore
//
// (any other unsatisfied word will work as well, but "ignore" is conventional.)
//
// To build a file only when using cgo, and only on Linux and OS X:
//
-// //go:build cgo && (linux || darwin)
+// //go:build cgo && (linux || darwin)
//
// Such a file is usually paired with another file implementing the
// default functionality for other systems, which in this case would
// carry the constraint:
//
-// //go:build !(cgo && (linux || darwin))
+// //go:build !(cgo && (linux || darwin))
//
// Naming a file dns_windows.go will cause it to be included only when
// building the package for Windows; similarly, math_386.s will be included
@@ -1897,57 +1879,55 @@
// with a "// +build" prefix. The gofmt command will add an equivalent //go:build
// constraint when encountering the older syntax.
//
-//
-// Build modes
+// # Build modes
//
// The 'go build' and 'go install' commands take a -buildmode argument which
// indicates which kind of object file is to be built. Currently supported values
// are:
//
-// -buildmode=archive
-// Build the listed non-main packages into .a files. Packages named
-// main are ignored.
+// -buildmode=archive
+// Build the listed non-main packages into .a files. Packages named
+// main are ignored.
//
-// -buildmode=c-archive
-// Build the listed main package, plus all packages it imports,
-// into a C archive file. The only callable symbols will be those
-// functions exported using a cgo //export comment. Requires
-// exactly one main package to be listed.
+// -buildmode=c-archive
+// Build the listed main package, plus all packages it imports,
+// into a C archive file. The only callable symbols will be those
+// functions exported using a cgo //export comment. Requires
+// exactly one main package to be listed.
//
-// -buildmode=c-shared
-// Build the listed main package, plus all packages it imports,
-// into a C shared library. The only callable symbols will
-// be those functions exported using a cgo //export comment.
-// Requires exactly one main package to be listed.
+// -buildmode=c-shared
+// Build the listed main package, plus all packages it imports,
+// into a C shared library. The only callable symbols will
+// be those functions exported using a cgo //export comment.
+// Requires exactly one main package to be listed.
//
-// -buildmode=default
-// Listed main packages are built into executables and listed
-// non-main packages are built into .a files (the default
-// behavior).
+// -buildmode=default
+// Listed main packages are built into executables and listed
+// non-main packages are built into .a files (the default
+// behavior).
//
-// -buildmode=shared
-// Combine all the listed non-main packages into a single shared
-// library that will be used when building with the -linkshared
-// option. Packages named main are ignored.
+// -buildmode=shared
+// Combine all the listed non-main packages into a single shared
+// library that will be used when building with the -linkshared
+// option. Packages named main are ignored.
//
-// -buildmode=exe
-// Build the listed main packages and everything they import into
-// executables. Packages not named main are ignored.
+// -buildmode=exe
+// Build the listed main packages and everything they import into
+// executables. Packages not named main are ignored.
//
-// -buildmode=pie
-// Build the listed main packages and everything they import into
-// position independent executables (PIE). Packages not named
-// main are ignored.
+// -buildmode=pie
+// Build the listed main packages and everything they import into
+// position independent executables (PIE). Packages not named
+// main are ignored.
//
-// -buildmode=plugin
-// Build the listed main packages, plus all packages that they
-// import, into a Go plugin. Packages not named main are ignored.
+// -buildmode=plugin
+// Build the listed main packages, plus all packages that they
+// import, into a Go plugin. Packages not named main are ignored.
//
// On AIX, when linking a C program that uses a Go archive built with
// -buildmode=c-archive, you must pass -Wl,-bnoobjreorder to the C compiler.
//
-//
-// Calling between Go and C
+// # Calling between Go and C
//
// There are two different ways to call between Go and C/C++ code.
//
@@ -1965,8 +1945,7 @@
// compiler. The CC or CXX environment variables may be set to determine
// the C or C++ compiler, respectively, to use.
//
-//
-// Build and test caching
+// # Build and test caching
//
// The go command caches build outputs for reuse in future builds.
// The default location for cache data is a subdirectory named go-build
@@ -2011,8 +1990,7 @@
// GODEBUG=gocachetest=1 causes the go command to print details of its
// decisions about whether to reuse a cached test result.
//
-//
-// Environment variables
+// # Environment variables
//
// The go command and the tools it invokes consult environment variables
// for configuration. If an environment variable is unset, the go command
@@ -2028,217 +2006,216 @@
//
// General-purpose environment variables:
//
-// GO111MODULE
-// Controls whether the go command runs in module-aware mode or GOPATH mode.
-// May be "off", "on", or "auto".
-// See https://golang.org/ref/mod#mod-commands.
-// GCCGO
-// The gccgo command to run for 'go build -compiler=gccgo'.
-// GOARCH
-// The architecture, or processor, for which to compile code.
-// Examples are amd64, 386, arm, ppc64.
-// GOBIN
-// The directory where 'go install' will install a command.
-// GOCACHE
-// The directory where the go command will store cached
-// information for reuse in future builds.
-// GOMODCACHE
-// The directory where the go command will store downloaded modules.
-// GODEBUG
-// Enable various debugging facilities. See 'go doc runtime'
-// for details.
-// GOENV
-// The location of the Go environment configuration file.
-// Cannot be set using 'go env -w'.
-// Setting GOENV=off in the environment disables the use of the
-// default configuration file.
-// GOFLAGS
-// A space-separated list of -flag=value settings to apply
-// to go commands by default, when the given flag is known by
-// the current command. Each entry must be a standalone flag.
-// Because the entries are space-separated, flag values must
-// not contain spaces. Flags listed on the command line
-// are applied after this list and therefore override it.
-// GOINSECURE
-// Comma-separated list of glob patterns (in the syntax of Go's path.Match)
-// of module path prefixes that should always be fetched in an insecure
-// manner. Only applies to dependencies that are being fetched directly.
-// GOINSECURE does not disable checksum database validation. GOPRIVATE or
-// GONOSUMDB may be used to achieve that.
-// GOOS
-// The operating system for which to compile code.
-// Examples are linux, darwin, windows, netbsd.
-// GOPATH
-// For more details see: 'go help gopath'.
-// GOPROXY
-// URL of Go module proxy. See https://golang.org/ref/mod#environment-variables
-// and https://golang.org/ref/mod#module-proxy for details.
-// GOPRIVATE, GONOPROXY, GONOSUMDB
-// Comma-separated list of glob patterns (in the syntax of Go's path.Match)
-// of module path prefixes that should always be fetched directly
-// or that should not be compared against the checksum database.
-// See https://golang.org/ref/mod#private-modules.
-// GOROOT
-// The root of the go tree.
-// GOSUMDB
-// The name of checksum database to use and optionally its public key and
-// URL. See https://golang.org/ref/mod#authenticating.
-// GOTMPDIR
-// The directory where the go command will write
-// temporary source files, packages, and binaries.
-// GOVCS
-// Lists version control commands that may be used with matching servers.
-// See 'go help vcs'.
-// GOWORK
-// In module aware mode, use the given go.work file as a workspace file.
-// By default or when GOWORK is "auto", the go command searches for a
-// file named go.work in the current directory and then containing directories
-// until one is found. If a valid go.work file is found, the modules
-// specified will collectively be used as the main modules. If GOWORK
-// is "off", or a go.work file is not found in "auto" mode, workspace
-// mode is disabled.
+// GO111MODULE
+// Controls whether the go command runs in module-aware mode or GOPATH mode.
+// May be "off", "on", or "auto".
+// See https://golang.org/ref/mod#mod-commands.
+// GCCGO
+// The gccgo command to run for 'go build -compiler=gccgo'.
+// GOARCH
+// The architecture, or processor, for which to compile code.
+// Examples are amd64, 386, arm, ppc64.
+// GOBIN
+// The directory where 'go install' will install a command.
+// GOCACHE
+// The directory where the go command will store cached
+// information for reuse in future builds.
+// GOMODCACHE
+// The directory where the go command will store downloaded modules.
+// GODEBUG
+// Enable various debugging facilities. See 'go doc runtime'
+// for details.
+// GOENV
+// The location of the Go environment configuration file.
+// Cannot be set using 'go env -w'.
+// Setting GOENV=off in the environment disables the use of the
+// default configuration file.
+// GOFLAGS
+// A space-separated list of -flag=value settings to apply
+// to go commands by default, when the given flag is known by
+// the current command. Each entry must be a standalone flag.
+// Because the entries are space-separated, flag values must
+// not contain spaces. Flags listed on the command line
+// are applied after this list and therefore override it.
+// GOINSECURE
+// Comma-separated list of glob patterns (in the syntax of Go's path.Match)
+// of module path prefixes that should always be fetched in an insecure
+// manner. Only applies to dependencies that are being fetched directly.
+// GOINSECURE does not disable checksum database validation. GOPRIVATE or
+// GONOSUMDB may be used to achieve that.
+// GOOS
+// The operating system for which to compile code.
+// Examples are linux, darwin, windows, netbsd.
+// GOPATH
+// For more details see: 'go help gopath'.
+// GOPROXY
+// URL of Go module proxy. See https://golang.org/ref/mod#environment-variables
+// and https://golang.org/ref/mod#module-proxy for details.
+// GOPRIVATE, GONOPROXY, GONOSUMDB
+// Comma-separated list of glob patterns (in the syntax of Go's path.Match)
+// of module path prefixes that should always be fetched directly
+// or that should not be compared against the checksum database.
+// See https://golang.org/ref/mod#private-modules.
+// GOROOT
+// The root of the go tree.
+// GOSUMDB
+// The name of checksum database to use and optionally its public key and
+// URL. See https://golang.org/ref/mod#authenticating.
+// GOTMPDIR
+// The directory where the go command will write
+// temporary source files, packages, and binaries.
+// GOVCS
+// Lists version control commands that may be used with matching servers.
+// See 'go help vcs'.
+// GOWORK
+// In module aware mode, use the given go.work file as a workspace file.
+// By default or when GOWORK is "auto", the go command searches for a
+// file named go.work in the current directory and then containing directories
+// until one is found. If a valid go.work file is found, the modules
+// specified will collectively be used as the main modules. If GOWORK
+// is "off", or a go.work file is not found in "auto" mode, workspace
+// mode is disabled.
//
// Environment variables for use with cgo:
//
-// AR
-// The command to use to manipulate library archives when
-// building with the gccgo compiler.
-// The default is 'ar'.
-// CC
-// The command to use to compile C code.
-// CGO_ENABLED
-// Whether the cgo command is supported. Either 0 or 1.
-// CGO_CFLAGS
-// Flags that cgo will pass to the compiler when compiling
-// C code.
-// CGO_CFLAGS_ALLOW
-// A regular expression specifying additional flags to allow
-// to appear in #cgo CFLAGS source code directives.
-// Does not apply to the CGO_CFLAGS environment variable.
-// CGO_CFLAGS_DISALLOW
-// A regular expression specifying flags that must be disallowed
-// from appearing in #cgo CFLAGS source code directives.
-// Does not apply to the CGO_CFLAGS environment variable.
-// CGO_CPPFLAGS, CGO_CPPFLAGS_ALLOW, CGO_CPPFLAGS_DISALLOW
-// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
-// but for the C preprocessor.
-// CGO_CXXFLAGS, CGO_CXXFLAGS_ALLOW, CGO_CXXFLAGS_DISALLOW
-// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
-// but for the C++ compiler.
-// CGO_FFLAGS, CGO_FFLAGS_ALLOW, CGO_FFLAGS_DISALLOW
-// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
-// but for the Fortran compiler.
-// CGO_LDFLAGS, CGO_LDFLAGS_ALLOW, CGO_LDFLAGS_DISALLOW
-// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
-// but for the linker.
-// CXX
-// The command to use to compile C++ code.
-// FC
-// The command to use to compile Fortran code.
-// PKG_CONFIG
-// Path to pkg-config tool.
+// AR
+// The command to use to manipulate library archives when
+// building with the gccgo compiler.
+// The default is 'ar'.
+// CC
+// The command to use to compile C code.
+// CGO_ENABLED
+// Whether the cgo command is supported. Either 0 or 1.
+// CGO_CFLAGS
+// Flags that cgo will pass to the compiler when compiling
+// C code.
+// CGO_CFLAGS_ALLOW
+// A regular expression specifying additional flags to allow
+// to appear in #cgo CFLAGS source code directives.
+// Does not apply to the CGO_CFLAGS environment variable.
+// CGO_CFLAGS_DISALLOW
+// A regular expression specifying flags that must be disallowed
+// from appearing in #cgo CFLAGS source code directives.
+// Does not apply to the CGO_CFLAGS environment variable.
+// CGO_CPPFLAGS, CGO_CPPFLAGS_ALLOW, CGO_CPPFLAGS_DISALLOW
+// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+// but for the C preprocessor.
+// CGO_CXXFLAGS, CGO_CXXFLAGS_ALLOW, CGO_CXXFLAGS_DISALLOW
+// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+// but for the C++ compiler.
+// CGO_FFLAGS, CGO_FFLAGS_ALLOW, CGO_FFLAGS_DISALLOW
+// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+// but for the Fortran compiler.
+// CGO_LDFLAGS, CGO_LDFLAGS_ALLOW, CGO_LDFLAGS_DISALLOW
+// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+// but for the linker.
+// CXX
+// The command to use to compile C++ code.
+// FC
+// The command to use to compile Fortran code.
+// PKG_CONFIG
+// Path to pkg-config tool.
//
// Architecture-specific environment variables:
//
-// GOARM
-// For GOARCH=arm, the ARM architecture for which to compile.
-// Valid values are 5, 6, 7.
-// GO386
-// For GOARCH=386, how to implement floating point instructions.
-// Valid values are sse2 (default), softfloat.
-// GOAMD64
-// For GOARCH=amd64, the microarchitecture level for which to compile.
-// Valid values are v1 (default), v2, v3, v4.
-// See https://golang.org/wiki/MinimumRequirements#amd64
-// GOMIPS
-// For GOARCH=mips{,le}, whether to use floating point instructions.
-// Valid values are hardfloat (default), softfloat.
-// GOMIPS64
-// For GOARCH=mips64{,le}, whether to use floating point instructions.
-// Valid values are hardfloat (default), softfloat.
-// GOPPC64
-// For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture).
-// Valid values are power8 (default), power9.
-// GOWASM
-// For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use.
-// Valid values are satconv, signext.
+// GOARM
+// For GOARCH=arm, the ARM architecture for which to compile.
+// Valid values are 5, 6, 7.
+// GO386
+// For GOARCH=386, how to implement floating point instructions.
+// Valid values are sse2 (default), softfloat.
+// GOAMD64
+// For GOARCH=amd64, the microarchitecture level for which to compile.
+// Valid values are v1 (default), v2, v3, v4.
+// See https://golang.org/wiki/MinimumRequirements#amd64
+// GOMIPS
+// For GOARCH=mips{,le}, whether to use floating point instructions.
+// Valid values are hardfloat (default), softfloat.
+// GOMIPS64
+// For GOARCH=mips64{,le}, whether to use floating point instructions.
+// Valid values are hardfloat (default), softfloat.
+// GOPPC64
+// For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture).
+// Valid values are power8 (default), power9.
+// GOWASM
+// For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use.
+// Valid values are satconv, signext.
//
// Special-purpose environment variables:
//
-// GCCGOTOOLDIR
-// If set, where to find gccgo tools, such as cgo.
-// The default is based on how gccgo was configured.
-// GOEXPERIMENT
-// Comma-separated list of toolchain experiments to enable or disable.
-// The list of available experiments may change arbitrarily over time.
-// See src/internal/goexperiment/flags.go for currently valid values.
-// Warning: This variable is provided for the development and testing
-// of the Go toolchain itself. Use beyond that purpose is unsupported.
-// GOROOT_FINAL
-// The root of the installed Go tree, when it is
-// installed in a location other than where it is built.
-// File names in stack traces are rewritten from GOROOT to
-// GOROOT_FINAL.
-// GO_EXTLINK_ENABLED
-// Whether the linker should use external linking mode
-// when using -linkmode=auto with code that uses cgo.
-// Set to 0 to disable external linking mode, 1 to enable it.
-// GIT_ALLOW_PROTOCOL
-// Defined by Git. A colon-separated list of schemes that are allowed
-// to be used with git fetch/clone. If set, any scheme not explicitly
-// mentioned will be considered insecure by 'go get'.
-// Because the variable is defined by Git, the default value cannot
-// be set using 'go env -w'.
+// GCCGOTOOLDIR
+// If set, where to find gccgo tools, such as cgo.
+// The default is based on how gccgo was configured.
+// GOEXPERIMENT
+// Comma-separated list of toolchain experiments to enable or disable.
+// The list of available experiments may change arbitrarily over time.
+// See src/internal/goexperiment/flags.go for currently valid values.
+// Warning: This variable is provided for the development and testing
+// of the Go toolchain itself. Use beyond that purpose is unsupported.
+// GOROOT_FINAL
+// The root of the installed Go tree, when it is
+// installed in a location other than where it is built.
+// File names in stack traces are rewritten from GOROOT to
+// GOROOT_FINAL.
+// GO_EXTLINK_ENABLED
+// Whether the linker should use external linking mode
+// when using -linkmode=auto with code that uses cgo.
+// Set to 0 to disable external linking mode, 1 to enable it.
+// GIT_ALLOW_PROTOCOL
+// Defined by Git. A colon-separated list of schemes that are allowed
+// to be used with git fetch/clone. If set, any scheme not explicitly
+// mentioned will be considered insecure by 'go get'.
+// Because the variable is defined by Git, the default value cannot
+// be set using 'go env -w'.
//
// Additional information available from 'go env' but not read from the environment:
//
-// GOEXE
-// The executable file name suffix (".exe" on Windows, "" on other systems).
-// GOGCCFLAGS
-// A space-separated list of arguments supplied to the CC command.
-// GOHOSTARCH
-// The architecture (GOARCH) of the Go toolchain binaries.
-// GOHOSTOS
-// The operating system (GOOS) of the Go toolchain binaries.
-// GOMOD
-// The absolute path to the go.mod of the main module.
-// If module-aware mode is enabled, but there is no go.mod, GOMOD will be
-// os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows).
-// If module-aware mode is disabled, GOMOD will be the empty string.
-// GOTOOLDIR
-// The directory where the go tools (compile, cover, doc, etc...) are installed.
-// GOVERSION
-// The version of the installed Go tree, as reported by runtime.Version.
+// GOEXE
+// The executable file name suffix (".exe" on Windows, "" on other systems).
+// GOGCCFLAGS
+// A space-separated list of arguments supplied to the CC command.
+// GOHOSTARCH
+// The architecture (GOARCH) of the Go toolchain binaries.
+// GOHOSTOS
+// The operating system (GOOS) of the Go toolchain binaries.
+// GOMOD
+// The absolute path to the go.mod of the main module.
+// If module-aware mode is enabled, but there is no go.mod, GOMOD will be
+// os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows).
+// If module-aware mode is disabled, GOMOD will be the empty string.
+// GOTOOLDIR
+// The directory where the go tools (compile, cover, doc, etc...) are installed.
+// GOVERSION
+// The version of the installed Go tree, as reported by runtime.Version.
//
-//
-// File types
+// # File types
//
// The go command examines the contents of a restricted set of files
// in each directory. It identifies which files to examine based on
// the extension of the file name. These extensions are:
//
-// .go
-// Go source files.
-// .c, .h
-// C source files.
-// If the package uses cgo or SWIG, these will be compiled with the
-// OS-native compiler (typically gcc); otherwise they will
-// trigger an error.
-// .cc, .cpp, .cxx, .hh, .hpp, .hxx
-// C++ source files. Only useful with cgo or SWIG, and always
-// compiled with the OS-native compiler.
-// .m
-// Objective-C source files. Only useful with cgo, and always
-// compiled with the OS-native compiler.
-// .s, .S, .sx
-// Assembler source files.
-// If the package uses cgo or SWIG, these will be assembled with the
-// OS-native assembler (typically gcc (sic)); otherwise they
-// will be assembled with the Go assembler.
-// .swig, .swigcxx
-// SWIG definition files.
-// .syso
-// System object files.
+// .go
+// Go source files.
+// .c, .h
+// C source files.
+// If the package uses cgo or SWIG, these will be compiled with the
+// OS-native compiler (typically gcc); otherwise they will
+// trigger an error.
+// .cc, .cpp, .cxx, .hh, .hpp, .hxx
+// C++ source files. Only useful with cgo or SWIG, and always
+// compiled with the OS-native compiler.
+// .m
+// Objective-C source files. Only useful with cgo, and always
+// compiled with the OS-native compiler.
+// .s, .S, .sx
+// Assembler source files.
+// If the package uses cgo or SWIG, these will be assembled with the
+// OS-native assembler (typically gcc (sic)); otherwise they
+// will be assembled with the Go assembler.
+// .swig, .swigcxx
+// SWIG definition files.
+// .syso
+// System object files.
//
// Files of each of these types except .syso may contain build
// constraints, but the go command stops scanning for build constraints
@@ -2246,8 +2223,7 @@
// line comment. See the go/build package documentation for
// more details.
//
-//
-// The go.mod file
+// # The go.mod file
//
// A module version is defined by a tree of source files, with a go.mod
// file in its root. When the go command is run, it looks in the current
@@ -2272,8 +2248,7 @@
// use 'go mod edit'. See 'go help mod edit' or
// https://golang.org/ref/mod#go-mod-edit.
//
-//
-// GOPATH environment variable
+// # GOPATH environment variable
//
// The Go path is used to resolve import statements.
// It is implemented by and documented in the go/build package.
@@ -2317,21 +2292,21 @@
//
// Here's an example directory layout:
//
-// GOPATH=/home/user/go
+// GOPATH=/home/user/go
//
-// /home/user/go/
-// src/
-// foo/
-// bar/ (go code in package bar)
-// x.go
-// quux/ (go code in package main)
-// y.go
-// bin/
-// quux (installed command)
-// pkg/
-// linux_amd64/
-// foo/
-// bar.a (installed package object)
+// /home/user/go/
+// src/
+// foo/
+// bar/ (go code in package bar)
+// x.go
+// quux/ (go code in package main)
+// y.go
+// bin/
+// quux (installed command)
+// pkg/
+// linux_amd64/
+// foo/
+// bar.a (installed package object)
//
// Go searches each directory listed in GOPATH to find source code,
// but new packages are always downloaded into the first directory
@@ -2339,33 +2314,32 @@
//
// See https://golang.org/doc/code.html for an example.
//
-// GOPATH and Modules
+// # GOPATH and Modules
//
// When using modules, GOPATH is no longer used for resolving imports.
// However, it is still used to store downloaded source code (in GOPATH/pkg/mod)
// and compiled commands (in GOPATH/bin).
//
-// Internal Directories
+// # Internal Directories
//
// Code in or below a directory named "internal" is importable only
// by code in the directory tree rooted at the parent of "internal".
// Here's an extended version of the directory layout above:
//
-// /home/user/go/
-// src/
-// crash/
-// bang/ (go code in package bang)
-// b.go
-// foo/ (go code in package foo)
-// f.go
-// bar/ (go code in package bar)
-// x.go
-// internal/
-// baz/ (go code in package baz)
-// z.go
-// quux/ (go code in package main)
-// y.go
-//
+// /home/user/go/
+// src/
+// crash/
+// bang/ (go code in package bang)
+// b.go
+// foo/ (go code in package foo)
+// f.go
+// bar/ (go code in package bar)
+// x.go
+// internal/
+// baz/ (go code in package baz)
+// z.go
+// quux/ (go code in package main)
+// y.go
//
// The code in z.go is imported as "foo/internal/baz", but that
// import statement can only appear in source files in the subtree
@@ -2375,7 +2349,7 @@
//
// See https://golang.org/s/go14internal for details.
//
-// Vendor Directories
+// # Vendor Directories
//
// Go 1.6 includes support for using local copies of external dependencies
// to satisfy imports of those dependencies, often referred to as vendoring.
@@ -2389,23 +2363,23 @@
// but with the "internal" directory renamed to "vendor"
// and a new foo/vendor/crash/bang directory added:
//
-// /home/user/go/
-// src/
-// crash/
-// bang/ (go code in package bang)
-// b.go
-// foo/ (go code in package foo)
-// f.go
-// bar/ (go code in package bar)
-// x.go
-// vendor/
-// crash/
-// bang/ (go code in package bang)
-// b.go
-// baz/ (go code in package baz)
-// z.go
-// quux/ (go code in package main)
-// y.go
+// /home/user/go/
+// src/
+// crash/
+// bang/ (go code in package bang)
+// b.go
+// foo/ (go code in package foo)
+// f.go
+// bar/ (go code in package bar)
+// x.go
+// vendor/
+// crash/
+// bang/ (go code in package bang)
+// b.go
+// baz/ (go code in package baz)
+// z.go
+// quux/ (go code in package main)
+// y.go
//
// The same visibility rules apply as for internal, but the code
// in z.go is imported as "baz", not as "foo/vendor/baz".
@@ -2427,8 +2401,7 @@
//
// See https://golang.org/s/go15vendor for details.
//
-//
-// Legacy GOPATH go get
+// # Legacy GOPATH go get
//
// The 'go get' command changes behavior depending on whether the
// go command is running in module-aware mode or legacy GOPATH mode.
@@ -2490,8 +2463,7 @@
//
// See also: go build, go install, go clean.
//
-//
-// Module proxy protocol
+// # Module proxy protocol
//
// A Go module proxy is any web server that can respond to GET requests for
// URLs of a specified form. The requests have no query parameters, so even
@@ -2501,15 +2473,14 @@
// For details on the GOPROXY protocol, see
// https://golang.org/ref/mod#goproxy-protocol.
//
-//
-// Import path syntax
+// # Import path syntax
//
// An import path (see 'go help packages') denotes a package stored in the local
// file system. In general, an import path denotes either a standard package (such
// as "unicode/utf8") or a package found in one of the work spaces (For more
// details see: 'go help gopath').
//
-// Relative import paths
+// # Relative import paths
//
// An import path beginning with ./ or ../ is called a relative path.
// The toolchain supports relative import paths as a shortcut in two ways.
@@ -2533,7 +2504,7 @@
// To avoid ambiguity, Go programs cannot use relative import paths
// within a work space.
//
-// Remote import paths
+// # Remote import paths
//
// Certain import paths also
// describe how to obtain the source code for the package using
@@ -2541,29 +2512,29 @@
//
// A few common code hosting sites have special syntax:
//
-// Bitbucket (Git, Mercurial)
+// Bitbucket (Git, Mercurial)
//
-// import "bitbucket.org/user/project"
-// import "bitbucket.org/user/project/sub/directory"
+// import "bitbucket.org/user/project"
+// import "bitbucket.org/user/project/sub/directory"
//
-// GitHub (Git)
+// GitHub (Git)
//
-// import "github.com/user/project"
-// import "github.com/user/project/sub/directory"
+// import "github.com/user/project"
+// import "github.com/user/project/sub/directory"
//
-// Launchpad (Bazaar)
+// Launchpad (Bazaar)
//
-// import "launchpad.net/project"
-// import "launchpad.net/project/series"
-// import "launchpad.net/project/series/sub/directory"
+// import "launchpad.net/project"
+// import "launchpad.net/project/series"
+// import "launchpad.net/project/series/sub/directory"
//
-// import "launchpad.net/~user/project/branch"
-// import "launchpad.net/~user/project/branch/sub/directory"
+// import "launchpad.net/~user/project/branch"
+// import "launchpad.net/~user/project/branch/sub/directory"
//
-// IBM DevOps Services (Git)
+// IBM DevOps Services (Git)
//
-// import "hub.jazz.net/git/user/project"
-// import "hub.jazz.net/git/user/project/sub/directory"
+// import "hub.jazz.net/git/user/project"
+// import "hub.jazz.net/git/user/project/sub/directory"
//
// For code hosted on other servers, import paths may either be qualified
// with the version control type, or the go tool can dynamically fetch
@@ -2572,26 +2543,26 @@
//
// To declare the code location, an import path of the form
//
-// repository.vcs/path
+// repository.vcs/path
//
// specifies the given repository, with or without the .vcs suffix,
// using the named version control system, and then the path inside
// that repository. The supported version control systems are:
//
-// Bazaar .bzr
-// Fossil .fossil
-// Git .git
-// Mercurial .hg
-// Subversion .svn
+// Bazaar .bzr
+// Fossil .fossil
+// Git .git
+// Mercurial .hg
+// Subversion .svn
//
// For example,
//
-// import "example.org/user/foo.hg"
+// import "example.org/user/foo.hg"
//
// denotes the root directory of the Mercurial repository at
// example.org/user/foo or foo.hg, and
//
-// import "example.org/repo.git/foo/bar"
+// import "example.org/repo.git/foo/bar"
//
// denotes the foo/bar directory of the Git repository at
// example.org/repo or repo.git.
@@ -2612,7 +2583,7 @@
//
// The meta tag has the form:
//
-//
+//
//
// The import-prefix is the import path corresponding to the repository
// root. It must be a prefix or an exact match of the package being
@@ -2630,16 +2601,16 @@
//
// For example,
//
-// import "example.org/pkg/foo"
+// import "example.org/pkg/foo"
//
// will result in the following requests:
//
-// https://example.org/pkg/foo?go-get=1 (preferred)
-// http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE)
+// https://example.org/pkg/foo?go-get=1 (preferred)
+// http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE)
//
// If that page contains the meta tag
//
-//
+//
//
// the go tool will verify that https://example.org/?go-get=1 contains the
// same meta tag and then git clone https://code.org/r/p/exproj into
@@ -2656,14 +2627,14 @@
// recognized and is preferred over those listing version control systems.
// That variant uses "mod" as the vcs in the content value, as in:
//
-//
+//
//
// This tag means to fetch modules with paths beginning with example.org
// from the module proxy available at the URL https://code.org/moduleproxy.
// See https://golang.org/ref/mod#goproxy-protocol for details about the
// proxy protocol.
//
-// Import path checking
+// # Import path checking
//
// When the custom import path feature described above redirects to a
// known code hosting site, each of the resulting packages has two possible
@@ -2672,8 +2643,8 @@
// A package statement is said to have an "import comment" if it is immediately
// followed (before the next newline) by a comment of one of these two forms:
//
-// package math // import "path"
-// package math /* import "path" */
+// package math // import "path"
+// package math /* import "path" */
//
// The go command will refuse to install a package with an import comment
// unless it is being referred to by that import path. In this way, import comments
@@ -2689,8 +2660,7 @@
//
// See https://golang.org/s/go14customimport for details.
//
-//
-// Modules, module versions, and more
+// # Modules, module versions, and more
//
// Modules are how Go manages dependencies.
//
@@ -2714,8 +2684,7 @@
// GOPRIVATE, and other environment variables. See 'go help environment'
// and https://golang.org/ref/mod#private-module-privacy for more information.
//
-//
-// Module authentication using go.sum
+// # Module authentication using go.sum
//
// When the go command downloads a module zip file or go.mod file into the
// module cache, it computes a cryptographic hash and compares it with a known
@@ -2726,12 +2695,11 @@
//
// For details, see https://golang.org/ref/mod#authenticating.
//
-//
-// Package lists and patterns
+// # Package lists and patterns
//
// Many commands apply to a set of packages:
//
-// go action [packages]
+// go action [packages]
//
// Usually, [packages] is a list of import paths.
//
@@ -2810,8 +2778,7 @@
// Directory and file names that begin with "." or "_" are ignored
// by the go tool, as are directories named "testdata".
//
-//
-// Configuration for downloading non-public code
+// # Configuration for downloading non-public code
//
// The go command defaults to downloading modules from the public Go module
// mirror at proxy.golang.org. It also defaults to validating downloaded modules,
@@ -2824,7 +2791,7 @@
// glob patterns (in the syntax of Go's path.Match) of module path prefixes.
// For example,
//
-// GOPRIVATE=*.corp.example.com,rsc.io/private
+// GOPRIVATE=*.corp.example.com,rsc.io/private
//
// causes the go command to treat as private any module with a path prefix
// matching either pattern, including git.corp.example.com/xyzzy, rsc.io/private,
@@ -2838,9 +2805,9 @@
// For example, if a company ran a module proxy serving private modules,
// users would configure go using:
//
-// GOPRIVATE=*.corp.example.com
-// GOPROXY=proxy.example.com
-// GONOPROXY=none
+// GOPRIVATE=*.corp.example.com
+// GOPROXY=proxy.example.com
+// GONOPROXY=none
//
// The GOPRIVATE variable is also used to define the "public" and "private"
// patterns for the GOVCS variable; see 'go help vcs'. For that usage,
@@ -2852,8 +2819,7 @@
//
// For more details, see https://golang.org/ref/mod#private-modules.
//
-//
-// Testing flags
+// # Testing flags
//
// The 'go test' command takes both flags that apply to 'go test' itself
// and flags that apply to the resulting test binary.
@@ -2866,204 +2832,204 @@
// The following flags are recognized by the 'go test' command and
// control the execution of any test:
//
-// -bench regexp
-// Run only those benchmarks matching a regular expression.
-// By default, no benchmarks are run.
-// To run all benchmarks, use '-bench .' or '-bench=.'.
-// The regular expression is split by unbracketed slash (/)
-// characters into a sequence of regular expressions, and each
-// part of a benchmark's identifier must match the corresponding
-// element in the sequence, if any. Possible parents of matches
-// are run with b.N=1 to identify sub-benchmarks. For example,
-// given -bench=X/Y, top-level benchmarks matching X are run
-// with b.N=1 to find any sub-benchmarks matching Y, which are
-// then run in full.
+// -bench regexp
+// Run only those benchmarks matching a regular expression.
+// By default, no benchmarks are run.
+// To run all benchmarks, use '-bench .' or '-bench=.'.
+// The regular expression is split by unbracketed slash (/)
+// characters into a sequence of regular expressions, and each
+// part of a benchmark's identifier must match the corresponding
+// element in the sequence, if any. Possible parents of matches
+// are run with b.N=1 to identify sub-benchmarks. For example,
+// given -bench=X/Y, top-level benchmarks matching X are run
+// with b.N=1 to find any sub-benchmarks matching Y, which are
+// then run in full.
//
-// -benchtime t
-// Run enough iterations of each benchmark to take t, specified
-// as a time.Duration (for example, -benchtime 1h30s).
-// The default is 1 second (1s).
-// The special syntax Nx means to run the benchmark N times
-// (for example, -benchtime 100x).
+// -benchtime t
+// Run enough iterations of each benchmark to take t, specified
+// as a time.Duration (for example, -benchtime 1h30s).
+// The default is 1 second (1s).
+// The special syntax Nx means to run the benchmark N times
+// (for example, -benchtime 100x).
//
-// -count n
-// Run each test, benchmark, and fuzz seed n times (default 1).
-// If -cpu is set, run n times for each GOMAXPROCS value.
-// Examples are always run once. -count does not apply to
-// fuzz tests matched by -fuzz.
+// -count n
+// Run each test, benchmark, and fuzz seed n times (default 1).
+// If -cpu is set, run n times for each GOMAXPROCS value.
+// Examples are always run once. -count does not apply to
+// fuzz tests matched by -fuzz.
//
-// -cover
-// Enable coverage analysis.
-// Note that because coverage works by annotating the source
-// code before compilation, compilation and test failures with
-// coverage enabled may report line numbers that don't correspond
-// to the original sources.
+// -cover
+// Enable coverage analysis.
+// Note that because coverage works by annotating the source
+// code before compilation, compilation and test failures with
+// coverage enabled may report line numbers that don't correspond
+// to the original sources.
//
-// -covermode set,count,atomic
-// Set the mode for coverage analysis for the package[s]
-// being tested. The default is "set" unless -race is enabled,
-// in which case it is "atomic".
-// The values:
-// set: bool: does this statement run?
-// count: int: how many times does this statement run?
-// atomic: int: count, but correct in multithreaded tests;
-// significantly more expensive.
-// Sets -cover.
+// -covermode set,count,atomic
+// Set the mode for coverage analysis for the package[s]
+// being tested. The default is "set" unless -race is enabled,
+// in which case it is "atomic".
+// The values:
+// set: bool: does this statement run?
+// count: int: how many times does this statement run?
+// atomic: int: count, but correct in multithreaded tests;
+// significantly more expensive.
+// Sets -cover.
//
-// -coverpkg pattern1,pattern2,pattern3
-// Apply coverage analysis in each test to packages matching the patterns.
-// The default is for each test to analyze only the package being tested.
-// See 'go help packages' for a description of package patterns.
-// Sets -cover.
+// -coverpkg pattern1,pattern2,pattern3
+// Apply coverage analysis in each test to packages matching the patterns.
+// The default is for each test to analyze only the package being tested.
+// See 'go help packages' for a description of package patterns.
+// Sets -cover.
//
-// -cpu 1,2,4
-// Specify a list of GOMAXPROCS values for which the tests, benchmarks or
-// fuzz tests should be executed. The default is the current value
-// of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz.
+// -cpu 1,2,4
+// Specify a list of GOMAXPROCS values for which the tests, benchmarks or
+// fuzz tests should be executed. The default is the current value
+// of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz.
//
-// -failfast
-// Do not start new tests after the first test failure.
+// -failfast
+// Do not start new tests after the first test failure.
//
-// -fuzz regexp
-// Run the fuzz test matching the regular expression. When specified,
-// the command line argument must match exactly one package within the
-// main module, and regexp must match exactly one fuzz test within
-// that package. Fuzzing will occur after tests, benchmarks, seed corpora
-// of other fuzz tests, and examples have completed. See the Fuzzing
-// section of the testing package documentation for details.
+// -fuzz regexp
+// Run the fuzz test matching the regular expression. When specified,
+// the command line argument must match exactly one package within the
+// main module, and regexp must match exactly one fuzz test within
+// that package. Fuzzing will occur after tests, benchmarks, seed corpora
+// of other fuzz tests, and examples have completed. See the Fuzzing
+// section of the testing package documentation for details.
//
-// -fuzztime t
-// Run enough iterations of the fuzz target during fuzzing to take t,
-// specified as a time.Duration (for example, -fuzztime 1h30s).
-// The default is to run forever.
-// The special syntax Nx means to run the fuzz target N times
-// (for example, -fuzztime 1000x).
+// -fuzztime t
+// Run enough iterations of the fuzz target during fuzzing to take t,
+// specified as a time.Duration (for example, -fuzztime 1h30s).
+// The default is to run forever.
+// The special syntax Nx means to run the fuzz target N times
+// (for example, -fuzztime 1000x).
//
-// -fuzzminimizetime t
-// Run enough iterations of the fuzz target during each minimization
-// attempt to take t, as specified as a time.Duration (for example,
-// -fuzzminimizetime 30s).
-// The default is 60s.
-// The special syntax Nx means to run the fuzz target N times
-// (for example, -fuzzminimizetime 100x).
+// -fuzzminimizetime t
+// Run enough iterations of the fuzz target during each minimization
+// attempt to take t, as specified as a time.Duration (for example,
+// -fuzzminimizetime 30s).
+// The default is 60s.
+// The special syntax Nx means to run the fuzz target N times
+// (for example, -fuzzminimizetime 100x).
//
-// -json
-// Log verbose output and test results in JSON. This presents the
-// same information as the -v flag in a machine-readable format.
+// -json
+// Log verbose output and test results in JSON. This presents the
+// same information as the -v flag in a machine-readable format.
//
-// -list regexp
-// List tests, benchmarks, fuzz tests, or examples matching the regular
-// expression. No tests, benchmarks, fuzz tests, or examples will be run.
-// This will only list top-level tests. No subtest or subbenchmarks will be
-// shown.
+// -list regexp
+// List tests, benchmarks, fuzz tests, or examples matching the regular
+// expression. No tests, benchmarks, fuzz tests, or examples will be run.
+// This will only list top-level tests. No subtest or subbenchmarks will be
+// shown.
//
-// -parallel n
-// Allow parallel execution of test functions that call t.Parallel, and
-// fuzz targets that call t.Parallel when running the seed corpus.
-// The value of this flag is the maximum number of tests to run
-// simultaneously.
-// While fuzzing, the value of this flag is the maximum number of
-// subprocesses that may call the fuzz function simultaneously, regardless of
-// whether T.Parallel is called.
-// By default, -parallel is set to the value of GOMAXPROCS.
-// Setting -parallel to values higher than GOMAXPROCS may cause degraded
-// performance due to CPU contention, especially when fuzzing.
-// Note that -parallel only applies within a single test binary.
-// The 'go test' command may run tests for different packages
-// in parallel as well, according to the setting of the -p flag
-// (see 'go help build').
+// -parallel n
+// Allow parallel execution of test functions that call t.Parallel, and
+// fuzz targets that call t.Parallel when running the seed corpus.
+// The value of this flag is the maximum number of tests to run
+// simultaneously.
+// While fuzzing, the value of this flag is the maximum number of
+// subprocesses that may call the fuzz function simultaneously, regardless of
+// whether T.Parallel is called.
+// By default, -parallel is set to the value of GOMAXPROCS.
+// Setting -parallel to values higher than GOMAXPROCS may cause degraded
+// performance due to CPU contention, especially when fuzzing.
+// Note that -parallel only applies within a single test binary.
+// The 'go test' command may run tests for different packages
+// in parallel as well, according to the setting of the -p flag
+// (see 'go help build').
//
-// -run regexp
-// Run only those tests, examples, and fuzz tests matching the regular
-// expression. For tests, the regular expression is split by unbracketed
-// slash (/) characters into a sequence of regular expressions, and each
-// part of a test's identifier must match the corresponding element in
-// the sequence, if any. Note that possible parents of matches are
-// run too, so that -run=X/Y matches and runs and reports the result
-// of all tests matching X, even those without sub-tests matching Y,
-// because it must run them to look for those sub-tests.
+// -run regexp
+// Run only those tests, examples, and fuzz tests matching the regular
+// expression. For tests, the regular expression is split by unbracketed
+// slash (/) characters into a sequence of regular expressions, and each
+// part of a test's identifier must match the corresponding element in
+// the sequence, if any. Note that possible parents of matches are
+// run too, so that -run=X/Y matches and runs and reports the result
+// of all tests matching X, even those without sub-tests matching Y,
+// because it must run them to look for those sub-tests.
//
-// -short
-// Tell long-running tests to shorten their run time.
-// It is off by default but set during all.bash so that installing
-// the Go tree can run a sanity check but not spend time running
-// exhaustive tests.
+// -short
+// Tell long-running tests to shorten their run time.
+// It is off by default but set during all.bash so that installing
+// the Go tree can run a sanity check but not spend time running
+// exhaustive tests.
//
-// -shuffle off,on,N
-// Randomize the execution order of tests and benchmarks.
-// It is off by default. If -shuffle is set to on, then it will seed
-// the randomizer using the system clock. If -shuffle is set to an
-// integer N, then N will be used as the seed value. In both cases,
-// the seed will be reported for reproducibility.
+// -shuffle off,on,N
+// Randomize the execution order of tests and benchmarks.
+// It is off by default. If -shuffle is set to on, then it will seed
+// the randomizer using the system clock. If -shuffle is set to an
+// integer N, then N will be used as the seed value. In both cases,
+// the seed will be reported for reproducibility.
//
-// -timeout d
-// If a test binary runs longer than duration d, panic.
-// If d is 0, the timeout is disabled.
-// The default is 10 minutes (10m).
+// -timeout d
+// If a test binary runs longer than duration d, panic.
+// If d is 0, the timeout is disabled.
+// The default is 10 minutes (10m).
//
-// -v
-// Verbose output: log all tests as they are run. Also print all
-// text from Log and Logf calls even if the test succeeds.
+// -v
+// Verbose output: log all tests as they are run. Also print all
+// text from Log and Logf calls even if the test succeeds.
//
-// -vet list
-// Configure the invocation of "go vet" during "go test"
-// to use the comma-separated list of vet checks.
-// If list is empty, "go test" runs "go vet" with a curated list of
-// checks believed to be always worth addressing.
-// If list is "off", "go test" does not run "go vet" at all.
+// -vet list
+// Configure the invocation of "go vet" during "go test"
+// to use the comma-separated list of vet checks.
+// If list is empty, "go test" runs "go vet" with a curated list of
+// checks believed to be always worth addressing.
+// If list is "off", "go test" does not run "go vet" at all.
//
// The following flags are also recognized by 'go test' and can be used to
// profile the tests during execution:
//
-// -benchmem
-// Print memory allocation statistics for benchmarks.
+// -benchmem
+// Print memory allocation statistics for benchmarks.
//
-// -blockprofile block.out
-// Write a goroutine blocking profile to the specified file
-// when all tests are complete.
-// Writes test binary as -c would.
+// -blockprofile block.out
+// Write a goroutine blocking profile to the specified file
+// when all tests are complete.
+// Writes test binary as -c would.
//
-// -blockprofilerate n
-// Control the detail provided in goroutine blocking profiles by
-// calling runtime.SetBlockProfileRate with n.
-// See 'go doc runtime.SetBlockProfileRate'.
-// The profiler aims to sample, on average, one blocking event every
-// n nanoseconds the program spends blocked. By default,
-// if -test.blockprofile is set without this flag, all blocking events
-// are recorded, equivalent to -test.blockprofilerate=1.
+// -blockprofilerate n
+// Control the detail provided in goroutine blocking profiles by
+// calling runtime.SetBlockProfileRate with n.
+// See 'go doc runtime.SetBlockProfileRate'.
+// The profiler aims to sample, on average, one blocking event every
+// n nanoseconds the program spends blocked. By default,
+// if -test.blockprofile is set without this flag, all blocking events
+// are recorded, equivalent to -test.blockprofilerate=1.
//
-// -coverprofile cover.out
-// Write a coverage profile to the file after all tests have passed.
-// Sets -cover.
+// -coverprofile cover.out
+// Write a coverage profile to the file after all tests have passed.
+// Sets -cover.
//
-// -cpuprofile cpu.out
-// Write a CPU profile to the specified file before exiting.
-// Writes test binary as -c would.
+// -cpuprofile cpu.out
+// Write a CPU profile to the specified file before exiting.
+// Writes test binary as -c would.
//
-// -memprofile mem.out
-// Write an allocation profile to the file after all tests have passed.
-// Writes test binary as -c would.
+// -memprofile mem.out
+// Write an allocation profile to the file after all tests have passed.
+// Writes test binary as -c would.
//
-// -memprofilerate n
-// Enable more precise (and expensive) memory allocation profiles by
-// setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
-// To profile all memory allocations, use -test.memprofilerate=1.
+// -memprofilerate n
+// Enable more precise (and expensive) memory allocation profiles by
+// setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
+// To profile all memory allocations, use -test.memprofilerate=1.
//
-// -mutexprofile mutex.out
-// Write a mutex contention profile to the specified file
-// when all tests are complete.
-// Writes test binary as -c would.
+// -mutexprofile mutex.out
+// Write a mutex contention profile to the specified file
+// when all tests are complete.
+// Writes test binary as -c would.
//
-// -mutexprofilefraction n
-// Sample 1 in n stack traces of goroutines holding a
-// contended mutex.
+// -mutexprofilefraction n
+// Sample 1 in n stack traces of goroutines holding a
+// contended mutex.
//
-// -outputdir directory
-// Place output files from profiling in the specified directory,
-// by default the directory in which "go test" is running.
+// -outputdir directory
+// Place output files from profiling in the specified directory,
+// by default the directory in which "go test" is running.
//
-// -trace trace.out
-// Write an execution trace to the specified file before exiting.
+// -trace trace.out
+// Write an execution trace to the specified file before exiting.
//
// Each of these flags is also recognized with an optional 'test.' prefix,
// as in -test.v. When invoking the generated test binary (the result of
@@ -3075,11 +3041,11 @@
//
// For instance, the command
//
-// go test -v -myflag testdata -cpuprofile=prof.out -x
+// go test -v -myflag testdata -cpuprofile=prof.out -x
//
// will compile the test binary and then run it as
//
-// pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
+// pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
//
// (The -x flag is removed because it applies only to the go command's
// execution, not to the test itself.)
@@ -3114,27 +3080,26 @@
//
// For instance, the command
//
-// go test -v -args -x -v
+// go test -v -args -x -v
//
// will compile the test binary and then run it as
//
-// pkg.test -test.v -x -v
+// pkg.test -test.v -x -v
//
// Similarly,
//
-// go test -args math
+// go test -args math
//
// will compile the test binary and then run it as
//
-// pkg.test math
+// pkg.test math
//
// In the first example, the -x and the second -v are passed through to the
// test binary unchanged and with no effect on the go command itself.
// In the second example, the argument math is passed through to the test
// binary, instead of being interpreted as the package list.
//
-//
-// Testing functions
+// # Testing functions
//
// The 'go test' command expects to find test, benchmark, and example functions
// in the "*_test.go" files corresponding to the package under test.
@@ -3142,15 +3107,15 @@
// A test function is one named TestXxx (where Xxx does not start with a
// lower case letter) and should have the signature,
//
-// func TestXxx(t *testing.T) { ... }
+// func TestXxx(t *testing.T) { ... }
//
// A benchmark function is one named BenchmarkXxx and should have the signature,
//
-// func BenchmarkXxx(b *testing.B) { ... }
+// func BenchmarkXxx(b *testing.B) { ... }
//
// A fuzz test is one named FuzzXxx and should have the signature,
//
-// func FuzzXxx(f *testing.F) { ... }
+// func FuzzXxx(f *testing.F) { ... }
//
// An example function is similar to a test function but, instead of using
// *testing.T to report success or failure, prints output to os.Stdout.
@@ -3169,25 +3134,25 @@
//
// Here is an example of an example:
//
-// func ExamplePrintln() {
-// Println("The output of\nthis example.")
-// // Output: The output of
-// // this example.
-// }
+// func ExamplePrintln() {
+// Println("The output of\nthis example.")
+// // Output: The output of
+// // this example.
+// }
//
// Here is another example where the ordering of the output is ignored:
//
-// func ExamplePerm() {
-// for _, value := range Perm(4) {
-// fmt.Println(value)
-// }
+// func ExamplePerm() {
+// for _, value := range Perm(4) {
+// fmt.Println(value)
+// }
//
-// // Unordered output: 4
-// // 2
-// // 1
-// // 3
-// // 0
-// }
+// // Unordered output: 4
+// // 2
+// // 1
+// // 3
+// // 0
+// }
//
// The entire test file is presented as the example when it contains a single
// example function, at least one other function, type, variable, or constant
@@ -3195,8 +3160,7 @@
//
// See the documentation of the testing package for more information.
//
-//
-// Controlling version control with GOVCS
+// # Controlling version control with GOVCS
//
// The 'go get' command can run version control commands like git
// to download imported code. This functionality is critical to the decentralized
@@ -3245,7 +3209,7 @@
//
// For example, consider:
//
-// GOVCS=github.com:git,evil.com:off,*:git|hg
+// GOVCS=github.com:git,evil.com:off,*:git|hg
//
// With this setting, code with a module or import path beginning with
// github.com/ can only use git; paths on evil.com cannot use any version
@@ -3262,14 +3226,12 @@
//
// To allow unfettered use of any version control system for any package, use:
//
-// GOVCS=*:all
+// GOVCS=*:all
//
// To disable all use of version control, use:
//
-// GOVCS=*:off
+// GOVCS=*:off
//
// The 'go env -w' command (see 'go help env') can be used to set the GOVCS
// variable for future go command invocations.
-//
-//
package main
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index b4387dc078..905dd68274 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -78,6 +78,10 @@ func tooSlow(t *testing.T) {
// (temp) directory.
var testGOROOT string
+// testGOROOT_FINAL is the GOROOT_FINAL with which the test binary is assumed to
+// have been built.
+var testGOROOT_FINAL = os.Getenv("GOROOT_FINAL")
+
var testGOCACHE string
var testGo string
diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go
index 702dc2a14a..b4181b1e44 100644
--- a/src/cmd/go/internal/bug/bug.go
+++ b/src/cmd/go/internal/bug/bug.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package bug implements the ``go bug'' command.
+// Package bug implements the “go bug” command.
package bug
import (
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
index a11a1a7655..c6ddfe55d5 100644
--- a/src/cmd/go/internal/cfg/cfg.go
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -44,9 +44,9 @@ func exeSuffix() string {
// These are general "build flags" used by build and other commands.
var (
- BuildA bool // -a flag
- BuildBuildmode string // -buildmode flag
- BuildBuildvcs bool // -buildvcs flag
+ BuildA bool // -a flag
+ BuildBuildmode string // -buildmode flag
+ BuildBuildvcs = "auto" // -buildvcs flag: "true", "false", or "auto"
BuildContext = defaultContext()
BuildMod string // -mod flag
BuildModExplicit bool // whether -mod was set explicitly
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
index dc93cdf598..8564411fb6 100644
--- a/src/cmd/go/internal/clean/clean.go
+++ b/src/cmd/go/internal/clean/clean.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package clean implements the ``go clean'' command.
+// Package clean implements the “go clean” command.
package clean
import (
diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go
index 7741a9022c..3b6cd94799 100644
--- a/src/cmd/go/internal/doc/doc.go
+++ b/src/cmd/go/internal/doc/doc.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package doc implements the ``go doc'' command.
+// Package doc implements the “go doc” command.
package doc
import (
diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
index fcabc8d1c7..529351dfbd 100644
--- a/src/cmd/go/internal/envcmd/env.go
+++ b/src/cmd/go/internal/envcmd/env.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package envcmd implements the ``go env'' command.
+// Package envcmd implements the “go env” command.
package envcmd
import (
@@ -184,15 +184,23 @@ func ExtraEnvVarsCostly() []cfg.EnvVar {
}
cmd := b.GccCmd(".", "")
+ join := func(s []string) string {
+ q, err := quoted.Join(s)
+ if err != nil {
+ return strings.Join(s, " ")
+ }
+ return q
+ }
+
return []cfg.EnvVar{
// Note: Update the switch in runEnv below when adding to this list.
- {Name: "CGO_CFLAGS", Value: strings.Join(cflags, " ")},
- {Name: "CGO_CPPFLAGS", Value: strings.Join(cppflags, " ")},
- {Name: "CGO_CXXFLAGS", Value: strings.Join(cxxflags, " ")},
- {Name: "CGO_FFLAGS", Value: strings.Join(fflags, " ")},
- {Name: "CGO_LDFLAGS", Value: strings.Join(ldflags, " ")},
+ {Name: "CGO_CFLAGS", Value: join(cflags)},
+ {Name: "CGO_CPPFLAGS", Value: join(cppflags)},
+ {Name: "CGO_CXXFLAGS", Value: join(cxxflags)},
+ {Name: "CGO_FFLAGS", Value: join(fflags)},
+ {Name: "CGO_LDFLAGS", Value: join(ldflags)},
{Name: "PKG_CONFIG", Value: b.PkgconfigCmd()},
- {Name: "GOGCCFLAGS", Value: strings.Join(cmd[3:], " ")},
+ {Name: "GOGCCFLAGS", Value: join(cmd[3:])},
}
}
diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go
index d8ba353de6..3705b30ef9 100644
--- a/src/cmd/go/internal/fix/fix.go
+++ b/src/cmd/go/internal/fix/fix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package fix implements the ``go fix'' command.
+// Package fix implements the “go fix” command.
package fix
import (
diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go
index 19656eab7f..3dc29d40b2 100644
--- a/src/cmd/go/internal/fmtcmd/fmt.go
+++ b/src/cmd/go/internal/fmtcmd/fmt.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package fmtcmd implements the ``go fmt'' command.
+// Package fmtcmd implements the “go fmt” command.
package fmtcmd
import (
diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go
index 54ccfe78f2..a46f4f8908 100644
--- a/src/cmd/go/internal/generate/generate.go
+++ b/src/cmd/go/internal/generate/generate.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package generate implements the ``go generate'' command.
+// Package generate implements the “go generate” command.
package generate
import (
@@ -84,6 +84,9 @@ Go generate sets several variables when it runs the generator:
The line number of the directive in the source file.
$GOPACKAGE
The name of the package of the file containing the directive.
+ $GOROOT
+ The GOROOT directory for the 'go' command that invoked the
+ generator, containing the Go toolchain and standard library.
$DOLLAR
A dollar sign.
@@ -326,6 +329,7 @@ func isGoGenerate(buf []byte) bool {
// single go:generate command.
func (g *Generator) setEnv() {
g.env = []string{
+ "GOROOT=" + cfg.GOROOT,
"GOARCH=" + cfg.BuildContext.GOARCH,
"GOOS=" + cfg.BuildContext.GOOS,
"GOFILE=" + g.file,
diff --git a/src/cmd/go/internal/generate/generate_test.go b/src/cmd/go/internal/generate/generate_test.go
index b546218a3c..15b1279f36 100644
--- a/src/cmd/go/internal/generate/generate_test.go
+++ b/src/cmd/go/internal/generate/generate_test.go
@@ -78,11 +78,11 @@ var defEnvMap = map[string]string{
// TestGenerateCommandShortHand - similar to TestGenerateCommandParse,
// except:
-// 1. if the result starts with -command, record that shorthand
-// before moving on to the next test.
-// 2. If a source line number is specified, set that in the parser
-// before executing the test. i.e., execute the split as if it
-// processing that source line.
+// 1. if the result starts with -command, record that shorthand
+// before moving on to the next test.
+// 2. If a source line number is specified, set that in the parser
+// before executing the test. i.e., execute the split as if it
+// processing that source line.
func TestGenerateCommandShorthand(t *testing.T) {
g := &Generator{
r: nil, // Unused here.
@@ -216,11 +216,11 @@ var splitTestsLines = []splitTestWithLine{
// TestGenerateCommandShortHand - similar to TestGenerateCommandParse,
// except:
-// 1. if the result starts with -command, record that shorthand
-// before moving on to the next test.
-// 2. If a source line number is specified, set that in the parser
-// before executing the test. i.e., execute the split as if it
-// processing that source line.
+// 1. if the result starts with -command, record that shorthand
+// before moving on to the next test.
+// 2. If a source line number is specified, set that in the parser
+// before executing the test. i.e., execute the split as if it
+// processing that source line.
func TestGenerateCommandShortHand2(t *testing.T) {
g := &Generator{
r: nil, // Unused here.
diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go
index 8cf8fe6645..1bb67bcf51 100644
--- a/src/cmd/go/internal/get/get.go
+++ b/src/cmd/go/internal/get/get.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package get implements the ``go get'' command.
+// Package get implements the “go get” command.
package get
import (
diff --git a/src/cmd/go/internal/help/help.go b/src/cmd/go/internal/help/help.go
index 2a07d2423b..f73097af84 100644
--- a/src/cmd/go/internal/help/help.go
+++ b/src/cmd/go/internal/help/help.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package help implements the ``go help'' command.
+// Package help implements the “go help” command.
package help
import (
diff --git a/src/cmd/go/internal/imports/build.go b/src/cmd/go/internal/imports/build.go
index 10e90fc216..53fa1967f7 100644
--- a/src/cmd/go/internal/imports/build.go
+++ b/src/cmd/go/internal/imports/build.go
@@ -215,7 +215,9 @@ func matchTag(name string, tags map[string]bool, prefer bool) bool {
}
// eval is like
+//
// x.Eval(func(tag string) bool { return matchTag(tag, tags) })
+//
// except that it implements the special case for tags["*"] meaning
// all tags are both true and false at the same time.
func eval(x constraint.Expr, tags map[string]bool, prefer bool) bool {
@@ -236,17 +238,18 @@ func eval(x constraint.Expr, tags map[string]bool, prefer bool) bool {
// suffix which does not match the current system.
// The recognized name formats are:
//
-// name_$(GOOS).*
-// name_$(GOARCH).*
-// name_$(GOOS)_$(GOARCH).*
-// name_$(GOOS)_test.*
-// name_$(GOARCH)_test.*
-// name_$(GOOS)_$(GOARCH)_test.*
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
//
// Exceptions:
-// if GOOS=android, then files with GOOS=linux are also matched.
-// if GOOS=illumos, then files with GOOS=solaris are also matched.
-// if GOOS=ios, then files with GOOS=darwin are also matched.
+//
+// if GOOS=android, then files with GOOS=linux are also matched.
+// if GOOS=illumos, then files with GOOS=solaris are also matched.
+// if GOOS=ios, then files with GOOS=darwin are also matched.
//
// If tags["*"] is true, then MatchFile will consider all possible
// GOOS and GOARCH to be available and will consequently
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index 5fc33989cd..17864e1da7 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package list implements the ``go list'' command.
+// Package list implements the “go list” command.
package list
import (
@@ -567,7 +567,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
pkgOpts := load.PackageOpts{
IgnoreImports: *listFind,
ModResolveTests: *listTest,
- LoadVCS: cfg.BuildBuildvcs,
+ LoadVCS: true,
}
pkgs := load.PackagesAndErrors(ctx, pkgOpts, args)
if !*listE {
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index a1cfcad826..10799ad516 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -17,6 +17,7 @@ import (
"internal/goroot"
"io/fs"
"os"
+ "os/exec"
"path"
pathpkg "path"
"path/filepath"
@@ -196,9 +197,9 @@ func (p *Package) Desc() string {
// IsTestOnly reports whether p is a test-only package.
//
// A “test-only” package is one that:
-// - is a test-only variant of an ordinary package, or
-// - is a synthesized "main" package for a test binary, or
-// - contains only _test.go files.
+// - is a test-only variant of an ordinary package, or
+// - is a synthesized "main" package for a test binary, or
+// - contains only _test.go files.
func (p *Package) IsTestOnly() bool {
return p.ForTest != "" ||
p.Internal.TestmainGo != nil ||
@@ -2062,7 +2063,8 @@ func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[st
// then there may be other things lying around, like symbolic links or .git directories.)
var list []string
for _, file := range match {
- rel := filepath.ToSlash(file[len(pkgdir)+1:]) // file, relative to p.Dir
+ // relative path to p.Dir which begins without prefix slash
+ rel := filepath.ToSlash(str.TrimFilePathPrefix(file, pkgdir))
what := "file"
info, err := fsys.Lstat(file)
@@ -2112,7 +2114,7 @@ func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[st
if err != nil {
return err
}
- rel := filepath.ToSlash(path[len(pkgdir)+1:])
+ rel := filepath.ToSlash(str.TrimFilePathPrefix(path, pkgdir))
name := info.Name()
if path != file && (isBadEmbedName(name) || ((name[0] == '.' || name[0] == '_') && !all)) {
// Ignore bad names, assuming they won't go into modules.
@@ -2377,7 +2379,7 @@ func (p *Package) setBuildInfo(includeVCS bool) {
var vcsCmd *vcs.Cmd
var err error
const allowNesting = true
- if includeVCS && p.Module != nil && p.Module.Version == "" && !p.Standard && !p.IsTestOnly() {
+ if includeVCS && cfg.BuildBuildvcs != "false" && p.Module != nil && p.Module.Version == "" && !p.Standard && !p.IsTestOnly() {
repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "", allowNesting)
if err != nil && !errors.Is(err, os.ErrNotExist) {
setVCSError(err)
@@ -2389,7 +2391,14 @@ func (p *Package) setBuildInfo(includeVCS bool) {
// repository containing the working directory. Don't include VCS info.
// If the repo contains the module or vice versa, but they are not
// the same directory, it's likely an error (see below).
- repoDir, vcsCmd = "", nil
+ goto omitVCS
+ }
+ if cfg.BuildBuildvcs == "auto" && vcsCmd != nil && vcsCmd.Cmd != "" {
+ if _, err := exec.LookPath(vcsCmd.Cmd); err != nil {
+ // We fould a repository, but the required VCS tool is not present.
+ // "-buildvcs=auto" means that we should silently drop the VCS metadata.
+ goto omitVCS
+ }
}
}
if repoDir != "" && vcsCmd.Status != nil {
@@ -2403,8 +2412,11 @@ func (p *Package) setBuildInfo(includeVCS bool) {
return
}
if pkgRepoDir != repoDir {
- setVCSError(fmt.Errorf("main package is in repository %q but current directory is in repository %q", pkgRepoDir, repoDir))
- return
+ if cfg.BuildBuildvcs != "auto" {
+ setVCSError(fmt.Errorf("main package is in repository %q but current directory is in repository %q", pkgRepoDir, repoDir))
+ return
+ }
+ goto omitVCS
}
modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "", allowNesting)
if err != nil {
@@ -2412,8 +2424,11 @@ func (p *Package) setBuildInfo(includeVCS bool) {
return
}
if modRepoDir != repoDir {
- setVCSError(fmt.Errorf("main module is in repository %q but current directory is in repository %q", modRepoDir, repoDir))
- return
+ if cfg.BuildBuildvcs != "auto" {
+ setVCSError(fmt.Errorf("main module is in repository %q but current directory is in repository %q", modRepoDir, repoDir))
+ return
+ }
+ goto omitVCS
}
type vcsStatusError struct {
@@ -2440,6 +2455,7 @@ func (p *Package) setBuildInfo(includeVCS bool) {
}
appendSetting("vcs.modified", strconv.FormatBool(st.Uncommitted))
}
+omitVCS:
p.Internal.BuildInfo = info.String()
}
diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go
index 39f1131a43..3780f358f4 100644
--- a/src/cmd/go/internal/load/test.go
+++ b/src/cmd/go/internal/load/test.go
@@ -76,9 +76,9 @@ func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *T
}
// TestPackagesAndErrors returns three packages:
-// - pmain, the package main corresponding to the test binary (running tests in ptest and pxtest).
-// - ptest, the package p compiled with added "package p" test files.
-// - pxtest, the result of compiling any "package p_test" (external) test files.
+// - pmain, the package main corresponding to the test binary (running tests in ptest and pxtest).
+// - ptest, the package p compiled with added "package p" test files.
+// - pxtest, the result of compiling any "package p_test" (external) test files.
//
// If the package has no "package p_test" test files, pxtest will be nil.
// If the non-test compilation of package p can be reused
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
index 35669388e0..a2ce794b96 100644
--- a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
+++ b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
@@ -17,9 +17,9 @@ import (
// Opening an exclusive-use file returns an error.
// The expected error strings are:
//
-// - "open/create -- file is locked" (cwfs, kfs)
-// - "exclusive lock" (fossil)
-// - "exclusive use file already open" (ramfs)
+// - "open/create -- file is locked" (cwfs, kfs)
+// - "exclusive lock" (fossil)
+// - "exclusive use file already open" (ramfs)
var lockedErrStrings = [...]string{
"file is locked",
"exclusive lock",
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_test.go b/src/cmd/go/internal/lockedfile/lockedfile_test.go
index c9907db46c..79352bc8c7 100644
--- a/src/cmd/go/internal/lockedfile/lockedfile_test.go
+++ b/src/cmd/go/internal/lockedfile/lockedfile_test.go
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// js does not support inter-process file locking.
+//
//go:build !js
package lockedfile_test
diff --git a/src/cmd/go/internal/lockedfile/transform_test.go b/src/cmd/go/internal/lockedfile/transform_test.go
index 3c1caa334e..833cbf7879 100644
--- a/src/cmd/go/internal/lockedfile/transform_test.go
+++ b/src/cmd/go/internal/lockedfile/transform_test.go
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// js does not support inter-process file locking.
+//
//go:build !js
package lockedfile_test
diff --git a/src/cmd/go/internal/modcmd/mod.go b/src/cmd/go/internal/modcmd/mod.go
index d72d0cacd6..125ba336a0 100644
--- a/src/cmd/go/internal/modcmd/mod.go
+++ b/src/cmd/go/internal/modcmd/mod.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package modcmd implements the ``go mod'' command.
+// Package modcmd implements the “go mod” command.
package modcmd
import (
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 3d8463e892..08a474f61b 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package modget implements the module-aware ``go get'' command.
+// Package modget implements the module-aware “go get” command.
package modget
// The arguments to 'go get' are patterns with optional version queries, with
@@ -731,10 +731,10 @@ func (r *resolver) performWildcardQueries(ctx context.Context) {
}
// queryWildcard adds a candidate set to q for each module for which:
-// - some version of the module is already in the build list, and
-// - that module exists at some version matching q.version, and
-// - either the module path itself matches q.pattern, or some package within
-// the module at q.version matches q.pattern.
+// - some version of the module is already in the build list, and
+// - that module exists at some version matching q.version, and
+// - either the module path itself matches q.pattern, or some package within
+// the module at q.version matches q.pattern.
func (r *resolver) queryWildcard(ctx context.Context, q *query) {
// For wildcard patterns, modload.QueryPattern only identifies modules
// matching the prefix of the path before the wildcard. However, the build
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index 6f9072c8c4..5b8d6051f3 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -676,11 +676,11 @@ func updateWorkspaceRoots(ctx context.Context, rs *Requirements, add []module.Ve
// invariants of the go.mod file needed to support graph pruning for the given
// packages:
//
-// 1. For each package marked with pkgInAll, the module path that provided that
-// package is included as a root.
-// 2. For all packages, the module that provided that package either remains
-// selected at the same version or is upgraded by the dependencies of a
-// root.
+// 1. For each package marked with pkgInAll, the module path that provided that
+// package is included as a root.
+// 2. For all packages, the module that provided that package either remains
+// selected at the same version or is upgraded by the dependencies of a
+// root.
//
// If any module that provided a package has been upgraded above its previous
// version, the caller may need to reload and recompute the package graph.
@@ -769,17 +769,17 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, direct map[
// updatePrunedRoots returns a set of root requirements that maintains the
// invariants of the go.mod file needed to support graph pruning:
//
-// 1. The selected version of the module providing each package marked with
-// either pkgInAll or pkgIsRoot is included as a root.
-// Note that certain root patterns (such as '...') may explode the root set
-// to contain every module that provides any package imported (or merely
-// required) by any other module.
-// 2. Each root appears only once, at the selected version of its path
-// (if rs.graph is non-nil) or at the highest version otherwise present as a
-// root (otherwise).
-// 3. Every module path that appears as a root in rs remains a root.
-// 4. Every version in add is selected at its given version unless upgraded by
-// (the dependencies of) an existing root or another module in add.
+// 1. The selected version of the module providing each package marked with
+// either pkgInAll or pkgIsRoot is included as a root.
+// Note that certain root patterns (such as '...') may explode the root set
+// to contain every module that provides any package imported (or merely
+// required) by any other module.
+// 2. Each root appears only once, at the selected version of its path
+// (if rs.graph is non-nil) or at the highest version otherwise present as a
+// root (otherwise).
+// 3. Every module path that appears as a root in rs remains a root.
+// 4. Every version in add is selected at its given version unless upgraded by
+// (the dependencies of) an existing root or another module in add.
//
// The packages in pkgs are assumed to have been loaded from either the roots of
// rs or the modules selected in the graph of rs.
@@ -787,26 +787,26 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, direct map[
// The above invariants together imply the graph-pruning invariants for the
// go.mod file:
//
-// 1. (The import invariant.) Every module that provides a package transitively
-// imported by any package or test in the main module is included as a root.
-// This follows by induction from (1) and (3) above. Transitively-imported
-// packages loaded during this invocation are marked with pkgInAll (1),
-// and by hypothesis any transitively-imported packages loaded in previous
-// invocations were already roots in rs (3).
+// 1. (The import invariant.) Every module that provides a package transitively
+// imported by any package or test in the main module is included as a root.
+// This follows by induction from (1) and (3) above. Transitively-imported
+// packages loaded during this invocation are marked with pkgInAll (1),
+// and by hypothesis any transitively-imported packages loaded in previous
+// invocations were already roots in rs (3).
//
-// 2. (The argument invariant.) Every module that provides a package matching
-// an explicit package pattern is included as a root. This follows directly
-// from (1): packages matching explicit package patterns are marked with
-// pkgIsRoot.
+// 2. (The argument invariant.) Every module that provides a package matching
+// an explicit package pattern is included as a root. This follows directly
+// from (1): packages matching explicit package patterns are marked with
+// pkgIsRoot.
//
-// 3. (The completeness invariant.) Every module that contributed any package
-// to the build is required by either the main module or one of the modules
-// it requires explicitly. This invariant is left up to the caller, who must
-// not load packages from outside the module graph but may add roots to the
-// graph, but is facilited by (3). If the caller adds roots to the graph in
-// order to resolve missing packages, then updatePrunedRoots will retain them,
-// the selected versions of those roots cannot regress, and they will
-// eventually be written back to the main module's go.mod file.
+// 3. (The completeness invariant.) Every module that contributed any package
+// to the build is required by either the main module or one of the modules
+// it requires explicitly. This invariant is left up to the caller, who must
+// not load packages from outside the module graph but may add roots to the
+// graph, but is facilited by (3). If the caller adds roots to the graph in
+// order to resolve missing packages, then updatePrunedRoots will retain them,
+// the selected versions of those roots cannot regress, and they will
+// eventually be written back to the main module's go.mod file.
//
// (See https://golang.org/design/36460-lazy-module-loading#invariants for more
// detail.)
@@ -1162,14 +1162,14 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, direct ma
//
// The roots are updated such that:
//
-// 1. The selected version of every module path in direct is included as a root
-// (if it is not "none").
-// 2. Each root is the selected version of its path. (We say that such a root
-// set is “consistent”.)
-// 3. Every version selected in the graph of rs remains selected unless upgraded
-// by a dependency in add.
-// 4. Every version in add is selected at its given version unless upgraded by
-// (the dependencies of) an existing root or another module in add.
+// 1. The selected version of every module path in direct is included as a root
+// (if it is not "none").
+// 2. Each root is the selected version of its path. (We say that such a root
+// set is “consistent”.)
+// 3. Every version selected in the graph of rs remains selected unless upgraded
+// by a dependency in add.
+// 4. Every version in add is selected at its given version unless upgraded by
+// (the dependencies of) an existing root or another module in add.
func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
mg, err := rs.Graph(ctx)
if err != nil {
diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go
index 0f37e3b2e9..c556664c35 100644
--- a/src/cmd/go/internal/modload/edit.go
+++ b/src/cmd/go/internal/modload/edit.go
@@ -16,20 +16,20 @@ import (
// editRequirements returns an edited version of rs such that:
//
-// 1. Each module version in mustSelect is selected.
+// 1. Each module version in mustSelect is selected.
//
-// 2. Each module version in tryUpgrade is upgraded toward the indicated
-// version as far as can be done without violating (1).
+// 2. Each module version in tryUpgrade is upgraded toward the indicated
+// version as far as can be done without violating (1).
//
-// 3. Each module version in rs.rootModules (or rs.graph, if rs is unpruned)
-// is downgraded from its original version only to the extent needed to
-// satisfy (1), or upgraded only to the extent needed to satisfy (1) and
-// (2).
+// 3. Each module version in rs.rootModules (or rs.graph, if rs is unpruned)
+// is downgraded from its original version only to the extent needed to
+// satisfy (1), or upgraded only to the extent needed to satisfy (1) and
+// (2).
//
-// 4. No module is upgraded above the maximum version of its path found in the
-// dependency graph of rs, the combined dependency graph of the versions in
-// mustSelect, or the dependencies of each individual module version in
-// tryUpgrade.
+// 4. No module is upgraded above the maximum version of its path found in the
+// dependency graph of rs, the combined dependency graph of the versions in
+// mustSelect, or the dependencies of each individual module version in
+// tryUpgrade.
//
// Generally, the module versions in mustSelect are due to the module or a
// package within the module matching an explicit command line argument to 'go
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index c170699535..e85a33dd50 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -1222,16 +1222,16 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
//
// In particular:
//
-// - Modules that provide packages directly imported from the main module are
-// marked as direct, and are promoted to explicit roots. If a needed root
-// cannot be promoted due to -mod=readonly or -mod=vendor, the importing
-// package is marked with an error.
+// - Modules that provide packages directly imported from the main module are
+// marked as direct, and are promoted to explicit roots. If a needed root
+// cannot be promoted due to -mod=readonly or -mod=vendor, the importing
+// package is marked with an error.
//
-// - If ld scanned the "all" pattern independent of build constraints, it is
-// guaranteed to have seen every direct import. Module dependencies that did
-// not provide any directly-imported package are then marked as indirect.
+// - If ld scanned the "all" pattern independent of build constraints, it is
+// guaranteed to have seen every direct import. Module dependencies that did
+// not provide any directly-imported package are then marked as indirect.
//
-// - Root dependencies are updated to their selected versions.
+// - Root dependencies are updated to their selected versions.
//
// The "changed" return value reports whether the update changed the selected
// version of any module that either provided a loaded package or may now
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index 33808ea109..27af78d99e 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -33,19 +33,27 @@ import (
// The version must take one of the following forms:
//
// - the literal string "latest", denoting the latest available, allowed
-// tagged version, with non-prereleases preferred over prereleases.
-// If there are no tagged versions in the repo, latest returns the most
-// recent commit.
+//
+// tagged version, with non-prereleases preferred over prereleases.
+// If there are no tagged versions in the repo, latest returns the most
+// recent commit.
+//
// - the literal string "upgrade", equivalent to "latest" except that if
-// current is a newer version, current will be returned (see below).
+//
+// current is a newer version, current will be returned (see below).
+//
// - the literal string "patch", denoting the latest available tagged version
-// with the same major and minor number as current (see below).
+//
+// with the same major and minor number as current (see below).
+//
// - v1, denoting the latest available tagged version v1.x.x.
// - v1.2, denoting the latest available tagged version v1.2.x.
// - v1.2.3, a semantic version string denoting that tagged version.
// - v1.2.3, >=v1.2.3,
-// denoting the version closest to the target and satisfying the given operator,
-// with non-prereleases preferred over prereleases.
+//
+// denoting the version closest to the target and satisfying the given operator,
+// with non-prereleases preferred over prereleases.
+//
// - a repository commit identifier or tag, denoting that commit.
//
// current denotes the currently-selected version of the module; it may be
@@ -433,9 +441,9 @@ func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool {
// filterVersions classifies versions into releases and pre-releases, filtering
// out:
-// 1. versions that do not satisfy the 'allowed' predicate, and
-// 2. "+incompatible" versions, if a compatible one satisfies the predicate
-// and the incompatible version is not preferred.
+// 1. versions that do not satisfy the 'allowed' predicate, and
+// 2. "+incompatible" versions, if a compatible one satisfies the predicate
+// and the incompatible version is not preferred.
//
// If the allowed predicate returns an error not equivalent to ErrDisallowed,
// filterVersions returns that error.
diff --git a/src/cmd/go/internal/modload/stat_openfile.go b/src/cmd/go/internal/modload/stat_openfile.go
index ff7c124af5..5773073d90 100644
--- a/src/cmd/go/internal/modload/stat_openfile.go
+++ b/src/cmd/go/internal/modload/stat_openfile.go
@@ -8,7 +8,7 @@
// are checked by the server and group information is not known to the client,
// access must open the file to check permissions.”
//
-// aix and js,wasm are similar, in that they do not define syscall.Access.
+// js,wasm is similar, in that it does not define syscall.Access.
package modload
diff --git a/src/cmd/go/internal/robustio/robustio.go b/src/cmd/go/internal/robustio/robustio.go
index ce3dbbde6d..15b33773cf 100644
--- a/src/cmd/go/internal/robustio/robustio.go
+++ b/src/cmd/go/internal/robustio/robustio.go
@@ -42,9 +42,9 @@ func RemoveAll(path string) error {
// in this package attempt to mitigate.
//
// Errors considered ephemeral include:
-// - syscall.ERROR_ACCESS_DENIED
-// - syscall.ERROR_FILE_NOT_FOUND
-// - internal/syscall/windows.ERROR_SHARING_VIOLATION
+// - syscall.ERROR_ACCESS_DENIED
+// - syscall.ERROR_FILE_NOT_FOUND
+// - internal/syscall/windows.ERROR_SHARING_VIOLATION
//
// This set may be expanded in the future; programs must not rely on the
// non-ephemerality of any given error.
diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go
index 35c5783373..ebe1611819 100644
--- a/src/cmd/go/internal/run/run.go
+++ b/src/cmd/go/internal/run/run.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package run implements the ``go run'' command.
+// Package run implements the “go run” command.
package run
import (
diff --git a/src/cmd/go/internal/str/path.go b/src/cmd/go/internal/str/path.go
index 0c8aaeaca1..a69e171f8c 100644
--- a/src/cmd/go/internal/str/path.go
+++ b/src/cmd/go/internal/str/path.go
@@ -58,8 +58,11 @@ func TrimFilePathPrefix(s, prefix string) string {
if !HasFilePathPrefix(s, prefix) {
return s
}
- if len(s) == len(prefix) {
- return ""
+ trimmed := s[len(prefix):]
+ if len(trimmed) == 0 || trimmed[0] != filepath.Separator {
+ // Prefix either is equal to s, or ends with a separator
+ // (for example, if it is exactly "/").
+ return trimmed
}
- return s[len(prefix)+1:]
+ return trimmed[1:]
}
diff --git a/src/cmd/go/internal/str/str.go b/src/cmd/go/internal/str/str.go
index 021bfbff77..975869d760 100644
--- a/src/cmd/go/internal/str/str.go
+++ b/src/cmd/go/internal/str/str.go
@@ -30,7 +30,9 @@ func StringList(args ...any) []string {
}
// ToFold returns a string with the property that
+//
// strings.EqualFold(s, t) iff ToFold(s) == ToFold(t)
+//
// This lets us test a large set of strings for fold-equivalent
// duplicates without making a quadratic number of calls
// to EqualFold. Note that strings.ToUpper and strings.ToLower
diff --git a/src/cmd/go/internal/str/str_test.go b/src/cmd/go/internal/str/str_test.go
index 8ea758e0a8..158fe65dc1 100644
--- a/src/cmd/go/internal/str/str_test.go
+++ b/src/cmd/go/internal/str/str_test.go
@@ -5,6 +5,8 @@
package str
import (
+ "os"
+ "runtime"
"testing"
)
@@ -27,3 +29,72 @@ func TestFoldDup(t *testing.T) {
}
}
}
+
+type trimFilePathPrefixTest struct {
+ s, prefix, want string
+}
+
+func TestTrimFilePathPrefixSlash(t *testing.T) {
+ if os.PathSeparator != '/' {
+ t.Skipf("test requires slash-separated file paths")
+ }
+ tests := []trimFilePathPrefixTest{
+ {"/foo", "", "foo"},
+ {"/foo", "/", "foo"},
+ {"/foo", "/foo", ""},
+ {"/foo/bar", "/foo", "bar"},
+ {"/foo/bar", "/foo/", "bar"},
+ // if prefix is not s's prefix, return s
+ {"/foo", "/bar", "/foo"},
+ {"/foo", "/foo/bar", "/foo"},
+ }
+
+ for _, tt := range tests {
+ if got := TrimFilePathPrefix(tt.s, tt.prefix); got != tt.want {
+ t.Errorf("TrimFilePathPrefix(%q, %q) = %q, want %q", tt.s, tt.prefix, got, tt.want)
+ }
+ }
+}
+
+func TestTrimFilePathPrefixWindows(t *testing.T) {
+ if runtime.GOOS != "windows" {
+ t.Skipf("test requires Windows file paths")
+ }
+ tests := []trimFilePathPrefixTest{
+ {`C:\foo`, `C:`, `foo`},
+ {`C:\foo`, `C:\`, `foo`},
+ {`C:\foo`, `C:\foo`, ``},
+ {`C:\foo\bar`, `C:\foo`, `bar`},
+ {`C:\foo\bar`, `C:\foo\`, `bar`},
+ // if prefix is not s's prefix, return s
+ {`C:\foo`, `C:\bar`, `C:\foo`},
+ {`C:\foo`, `C:\foo\bar`, `C:\foo`},
+ // if volumes are different, return s
+ {`C:\foo`, ``, `C:\foo`},
+ {`C:\foo`, `\foo`, `C:\foo`},
+ {`C:\foo`, `D:\foo`, `C:\foo`},
+
+ //UNC path
+ {`\\host\share\foo`, `\\host\share`, `foo`},
+ {`\\host\share\foo`, `\\host\share\`, `foo`},
+ {`\\host\share\foo`, `\\host\share\foo`, ``},
+ {`\\host\share\foo\bar`, `\\host\share\foo`, `bar`},
+ {`\\host\share\foo\bar`, `\\host\share\foo\`, `bar`},
+ // if prefix is not s's prefix, return s
+ {`\\host\share\foo`, `\\host\share\bar`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\host\share\foo\bar`, `\\host\share\foo`},
+ // if either host or share name is different, return s
+ {`\\host\share\foo`, ``, `\\host\share\foo`},
+ {`\\host\share\foo`, `\foo`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\host\other\`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\other\share\`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\host\`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\share\`, `\\host\share\foo`},
+ }
+
+ for _, tt := range tests {
+ if got := TrimFilePathPrefix(tt.s, tt.prefix); got != tt.want {
+ t.Errorf("TrimFilePathPrefix(%q, %q) = %q, want %q", tt.s, tt.prefix, got, tt.want)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go
index c046caca25..f3cd0b1392 100644
--- a/src/cmd/go/internal/test/testflag.go
+++ b/src/cmd/go/internal/test/testflag.go
@@ -270,6 +270,7 @@ func (f *shuffleFlag) Set(value string) error {
// pkg.test's arguments.
// We allow known flags both before and after the package name list,
// to allow both
+//
// go test fmt -custom-flag-for-fmt-test
// go test -x math
func testFlags(args []string) (packageNames, passToTest []string) {
diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go
index 4fe4c2baed..e8b55092d8 100644
--- a/src/cmd/go/internal/tool/tool.go
+++ b/src/cmd/go/internal/tool/tool.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package tool implements the ``go tool'' command.
+// Package tool implements the “go tool” command.
package tool
import (
diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go
index 1c0eb5407d..5de7b83efa 100644
--- a/src/cmd/go/internal/version/version.go
+++ b/src/cmd/go/internal/version/version.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package version implements the ``go version'' command.
+// Package version implements the “go version” command.
package version
import (
diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go
index d3e0dd8116..a0b11fdd3d 100644
--- a/src/cmd/go/internal/vet/vet.go
+++ b/src/cmd/go/internal/vet/vet.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package vet implements the ``go vet'' command.
+// Package vet implements the “go vet” command.
package vet
import (
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index 42f052d341..e9a8ee6cb3 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -13,6 +13,7 @@ import (
"os"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"cmd/go/internal/base"
@@ -91,11 +92,13 @@ and test commands:
-buildmode mode
build mode to use. See 'go help buildmode' for more.
-buildvcs
- Whether to stamp binaries with version control information. By default,
- version control information is stamped into a binary if the main package
- and the main module containing it are in the repository containing the
- current directory (if there is a repository). Use -buildvcs=false to
- omit version control information.
+ Whether to stamp binaries with version control information
+ ("true", "false", or "auto"). By default ("auto"), version control
+ information is stamped into a binary if the main package, the main module
+ containing it, and the current directory are all in the same repository.
+ Use -buildvcs=false to always omit version control information, or
+ -buildvcs=true to error out if version control information is available but
+ cannot be included due to a missing tool or ambiguous directory structure.
-compiler name
name of compiler to use, as in runtime.Compiler (gccgo or gc).
-gccgoflags '[pattern=]arg list'
@@ -302,7 +305,7 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) {
cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildToolexec), "toolexec", "")
cmd.Flag.BoolVar(&cfg.BuildTrimpath, "trimpath", false, "")
cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "")
- cmd.Flag.BoolVar(&cfg.BuildBuildvcs, "buildvcs", true, "")
+ cmd.Flag.Var((*buildvcsFlag)(&cfg.BuildBuildvcs), "buildvcs", "")
// Undocumented, unstable debugging flags.
cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "")
@@ -332,6 +335,29 @@ func (v *tagsFlag) String() string {
return ""
}
+// buildvcsFlag is the implementation of the -buildvcs flag.
+type buildvcsFlag string
+
+func (f *buildvcsFlag) IsBoolFlag() bool { return true } // allow -buildvcs (without arguments)
+
+func (f *buildvcsFlag) Set(s string) error {
+ // https://go.dev/issue/51748: allow "-buildvcs=auto",
+ // in addition to the usual "true" and "false".
+ if s == "" || s == "auto" {
+ *f = "auto"
+ return nil
+ }
+
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return errors.New("value is neither 'auto' nor a valid bool")
+ }
+ *f = (buildvcsFlag)(strconv.FormatBool(b)) // convert to canonical "true" or "false"
+ return nil
+}
+
+func (f *buildvcsFlag) String() string { return string(*f) }
+
// fileExtSplit expects a filename and returns the name
// and ext (without the dot). If the file has no
// extension, ext will be empty.
@@ -379,7 +405,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) {
var b Builder
b.Init()
- pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{LoadVCS: cfg.BuildBuildvcs}, args)
+ pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{LoadVCS: true}, args)
load.CheckPackageErrors(pkgs)
explicitO := len(cfg.BuildO) > 0
@@ -532,16 +558,22 @@ See also: go build, go get, go clean.
// libname returns the filename to use for the shared library when using
// -buildmode=shared. The rules we use are:
// Use arguments for special 'meta' packages:
+//
// std --> libstd.so
// std cmd --> libstd,cmd.so
+//
// A single non-meta argument with trailing "/..." is special cased:
+//
// foo/... --> libfoo.so
// (A relative path like "./..." expands the "." first)
+//
// Use import paths for other cases, changing '/' to '-':
+//
// somelib --> libsubdir-somelib.so
// ./ or ../ --> libsubdir-somelib.so
// gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so
// a/... b/... ---> liba/c,b/d.so - all matching import paths
+//
// Name parts are joined with ','.
func libname(args []string, pkgs []*load.Package) (string, error) {
var libname string
@@ -603,7 +635,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
modload.InitWorkfile()
BuildInit()
- pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{LoadVCS: cfg.BuildBuildvcs}, args)
+ pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{LoadVCS: true}, args)
if cfg.ModulesEnabled && !modload.HasModRoot() {
haveErrors := false
allMissingErrors := true
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 4252209f10..9c9d58b2a1 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -303,7 +303,9 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID {
fmt.Fprintf(h, "fuzz %q\n", fuzzFlags)
}
}
- fmt.Fprintf(h, "modinfo %q\n", p.Internal.BuildInfo)
+ if p.Internal.BuildInfo != "" {
+ fmt.Fprintf(h, "modinfo %q\n", p.Internal.BuildInfo)
+ }
// Configuration specific to compiler toolchain.
switch cfg.BuildToolchainName {
@@ -1882,6 +1884,7 @@ func (b *Builder) installHeader(ctx context.Context, a *Action) error {
}
// cover runs, in effect,
+//
// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
func (b *Builder) cover(a *Action, dst, src string, varName string) error {
return b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil,
diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
index d1e2c673fa..0bf8763543 100644
--- a/src/cmd/go/internal/work/security.go
+++ b/src/cmd/go/internal/work/security.go
@@ -171,7 +171,7 @@ var validLinkerFlags = []*lazyregexp.Regexp{
// Note that any wildcards in -Wl need to exclude comma,
// since -Wl splits its argument at commas and passes
// them all to the linker uninterpreted. Allowing comma
- // in a wildcard would allow tunnelling arbitrary additional
+ // in a wildcard would allow tunneling arbitrary additional
// linker arguments through one of these.
re(`-Wl,--(no-)?allow-multiple-definition`),
re(`-Wl,--(no-)?allow-shlib-undefined`),
diff --git a/src/cmd/go/internal/workcmd/work.go b/src/cmd/go/internal/workcmd/work.go
index 39c81e8f5d..c99cc2a3fa 100644
--- a/src/cmd/go/internal/workcmd/work.go
+++ b/src/cmd/go/internal/workcmd/work.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package workcmd implements the ``go work'' command.
+// Package workcmd implements the “go work” command.
package workcmd
import (
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 76c542f32a..6254cf97c1 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -175,7 +175,7 @@ func (ts *testScript) setup() {
"GOPROXY=" + proxyURL,
"GOPRIVATE=",
"GOROOT=" + testGOROOT,
- "GOROOT_FINAL=" + os.Getenv("GOROOT_FINAL"), // causes spurious rebuilds and breaks the "stale" built-in if not propagated
+ "GOROOT_FINAL=" + testGOROOT_FINAL, // causes spurious rebuilds and breaks the "stale" built-in if not propagated
"GOTRACEBACK=system",
"TESTGO_GOROOT=" + testGOROOT,
"GOSUMDB=" + testSumDBVerifierKey,
@@ -385,6 +385,8 @@ Script:
}
}
}
+ case "mismatched-goroot":
+ ok = testGOROOT_FINAL != "" && testGOROOT_FINAL != testGOROOT
default:
if strings.HasPrefix(cond.tag, "exec:") {
prog := cond.tag[len("exec:"):]
diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README
index 17b582d662..85e575d56e 100644
--- a/src/cmd/go/testdata/script/README
+++ b/src/cmd/go/testdata/script/README
@@ -90,6 +90,8 @@ should only run when the condition is satisfied. The available conditions are:
- [exec:prog] for whether prog is available for execution (found by exec.LookPath)
- [GODEBUG:value] for whether value is one of the comma-separated entries in the GODEBUG variable
- [buildmode:value] for whether -buildmode=value is supported
+ - [trimpath] for whether the 'go' binary was built with -trimpath
+ - [mismatched-goroot] for whether the test's GOROOT_FINAL does not match the real GOROOT
A condition can be negated: [!short] means to run the rest of the line
when testing.Short() is false. Multiple conditions may be given for a single
diff --git a/src/cmd/go/testdata/script/build_buildvcs_auto.txt b/src/cmd/go/testdata/script/build_buildvcs_auto.txt
new file mode 100644
index 0000000000..9eac568045
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_buildvcs_auto.txt
@@ -0,0 +1,87 @@
+# Regression test for https://go.dev/issue/51748: by default, 'go build' should
+# not attempt to stamp VCS information when the VCS tool is not present.
+
+[short] skip
+[!exec:git] skip
+
+cd sub
+exec git init .
+exec git add sub.go
+exec git commit -m 'initial state'
+cd ..
+
+exec git init
+exec git submodule add ./sub
+exec git add go.mod example.go
+exec git commit -m 'initial state'
+
+
+# Control case: with a git binary in $PATH,
+# 'go build' on a package in the same git repo
+# succeeds and stamps VCS metadata by default.
+
+go build -o example.exe .
+go version -m example.exe
+stdout '^\tbuild\tvcs=git$'
+stdout '^\tbuild\tvcs.modified=false$'
+
+
+# Building a binary from a different (nested) VCS repo should not stamp VCS
+# info. It should be an error if VCS stamps are requested explicitly with
+# '-buildvcs' (since we know the VCS metadata exists), but not an error
+# with '-buildvcs=auto'.
+
+go build -o sub.exe ./sub
+go version -m sub.exe
+! stdout '^\tbuild\tvcs'
+
+! go build -buildvcs -o sub.exe ./sub
+stderr '\Aerror obtaining VCS status: main package is in repository ".*" but current directory is in repository ".*"\n\tUse -buildvcs=false to disable VCS stamping.\n\z'
+
+cd ./sub
+go build -o sub.exe .
+go version -m sub.exe
+! stdout '^\tbuild\tvcs'
+
+! go build -buildvcs -o sub.exe .
+stderr '\Aerror obtaining VCS status: main module is in repository ".*" but current directory is in repository ".*"\n\tUse -buildvcs=false to disable VCS stamping.\n\z'
+cd ..
+
+
+# After removing 'git' from $PATH, 'go build -buildvcs' should fail...
+
+env PATH=
+env path=
+! go build -buildvcs -o example.exe .
+stderr 'go: missing Git command\. See https://golang\.org/s/gogetcmd$'
+
+# ...but by default we should omit VCS metadata when the tool is missing.
+
+go build -o example.exe .
+go version -m example.exe
+! stdout '^\tbuild\tvcs'
+
+# The default behavior can be explicitly set with '-buildvcs=auto'.
+
+go build -buildvcs=auto -o example.exe .
+go version -m example.exe
+! stdout '^\tbuild\tvcs'
+
+# Other flag values should be rejected with a useful error message.
+
+! go build -buildvcs=hg -o example.exe .
+stderr '\Ainvalid boolean value "hg" for -buildvcs: value is neither ''auto'' nor a valid bool\nusage: go build .*\nRun ''go help build'' for details.\n\z'
+
+
+-- go.mod --
+module example
+
+go 1.18
+-- example.go --
+package main
+
+func main() {}
+-- sub/sub.go --
+package main
+
+func main() {}
diff --git a/src/cmd/go/testdata/script/build_trimpath_goroot.txt b/src/cmd/go/testdata/script/build_trimpath_goroot.txt
index 7b870ab739..a26cfd23be 100644
--- a/src/cmd/go/testdata/script/build_trimpath_goroot.txt
+++ b/src/cmd/go/testdata/script/build_trimpath_goroot.txt
@@ -8,23 +8,51 @@
# TODO(#51483): when runtime.GOROOT() returns the empty string,
# go/build should default to 'go env GOROOT' instead.
-env GOROOT=
env GOROOT_FINAL=
+[trimpath] env GOROOT=
[trimpath] ! go env GOROOT
[trimpath] stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
+[trimpath] env GOROOT=$TESTGO_GOROOT
+
+[short] stop
+
+# With GOROOT still set but GOROOT_FINAL unset, 'go build' and 'go test -c'
+# should cause runtime.GOROOT() to report either the correct GOROOT
+# (without -trimpath) or no GOROOT at all (with -trimpath).
+
+go build -o example.exe .
+go build -trimpath -o example-trimpath.exe .
+go test -c -o example.test.exe .
+go test -trimpath -c -o example.test-trimpath.exe .
+
+env GOROOT=
+
+exec ./example.exe
+stdout '^GOROOT '$TESTGO_GOROOT'$'
+stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+
+! exec ./example-trimpath.exe
+stdout '^GOROOT $'
+stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)\n\z'
+
+exec ./example.test.exe -test.v
+stdout '^GOROOT '$TESTGO_GOROOT'$'
+stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+
+! exec ./example.test-trimpath.exe -test.v
+stdout '^GOROOT $'
+stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)$'
+
+# If a correct GOROOT is baked in to the 'go' command itself, 'go run' and
+# 'go test' should not implicitly set GOROOT in the process environment
+# (because that could mask an unexpected production dependency on the GOROOT
+# environment variable), but 'go generate' should (because the generator may
+# reasonably expect to be able to locate the GOROOT for which it is generating
+# code).
+
[trimpath] stop
-
-
-[short] skip
-
-go run .
-stdout '^GOROOT '$TESTGO_GOROOT'$'
-stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
-
-go test -v .
-stdout '^GOROOT '$TESTGO_GOROOT'$'
-stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+[mismatched-goroot] stop
! go run -trimpath .
stdout '^GOROOT $'
@@ -34,6 +62,11 @@ stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WO
stdout '^GOROOT $'
stdout 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)$'
+env GOFLAGS=-trimpath
+go generate .
+stdout '^GOROOT '$TESTGO_GOROOT'$'
+stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+
-- go.mod --
module example
@@ -41,6 +74,8 @@ go 1.19
-- main.go --
package main
+//go:generate go run .
+
import (
"fmt"
"go/build"
diff --git a/src/cmd/go/testdata/script/test_buildvcs.txt b/src/cmd/go/testdata/script/test_buildvcs.txt
index a0689195e8..a669966036 100644
--- a/src/cmd/go/testdata/script/test_buildvcs.txt
+++ b/src/cmd/go/testdata/script/test_buildvcs.txt
@@ -5,6 +5,8 @@
[short] skip
[!exec:git] skip
+env GOFLAGS=-buildvcs # override default -buildvcs=auto in GOFLAGS, as a user might
+
exec git init
# The test binaries should not have VCS settings stamped.
diff --git a/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt b/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
index a09e85b972..e61c4f9d04 100644
--- a/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
@@ -18,30 +18,27 @@ env GOCACHE=$WORK/gocache
exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinCache -test.fuzztime=1000x
go run check_cache/check_cache.go $GOCACHE/fuzz/FuzzMinCache
-go test -c -fuzz=. # Build using shared build cache for speed.
-env GOCACHE=$WORK/gocache
-
# Test that minimization occurs for a crash that appears while minimizing a
# newly found interesting input. There must be only one worker for this test to
# be flaky like we want.
-! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerCrashInMinimization -test.run=FuzzMinimizerCrashInMinimization -test.fuzztime=10000x -test.parallel=1
+! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerCrashInMinimization -test.run=XXX -test.fuzztime=10000x -test.parallel=1
! stdout '^ok'
stdout -count=1 'got the minimum size!'
-stdout -count=1 'flaky failure'
+stdout -count=1 'bad input'
stdout FAIL
# Check that the input written to testdata will reproduce the error, and is the
# smallest possible.
-go run check_testdata/check_testdata.go FuzzMinimizerCrashInMinimization 50
+go run check_testdata/check_testdata.go FuzzMinimizerCrashInMinimization 1
# Test that a nonrecoverable error that occurs while minimizing an interesting
# input is reported correctly.
-! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerNonrecoverableCrashInMinimization -test.run=FuzzMinimizerNonrecoverableCrashInMinimization -test.fuzztime=10000x -test.parallel=1
+! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerNonrecoverableCrashInMinimization -test.run=XXX -test.fuzztime=10000x -test.parallel=1
! stdout '^ok'
stdout -count=1 'fuzzing process hung or terminated unexpectedly while minimizing'
stdout -count=1 'EOF'
stdout FAIL
# Check that the input written to testdata will reproduce the error.
-go run check_testdata/check_testdata.go FuzzMinimizerNonrecoverableCrashInMinimization 100
+go run check_testdata/check_testdata.go FuzzMinimizerNonrecoverableCrashInMinimization 1
-- go.mod --
module fuzz
@@ -65,57 +62,34 @@ package fuzz
import (
"bytes"
- "io"
"os"
- "strings"
"testing"
- "unicode/utf8"
)
func FuzzMinimizerCrashInMinimization(f *testing.F) {
- seed := strings.Repeat("A", 1000)
+ seed := bytes.Repeat([]byte{255}, 100)
f.Add(seed)
- i := 3
- f.Fuzz(func(t *testing.T, s string) {
- if len(s) < 50 || len(s) > 1100 {
- // Make sure that b is large enough that it can be minimized
+ f.Fuzz(func(t *testing.T, b []byte) {
+ if bytes.Equal(seed, b) {
return
}
- if s != seed {
- // This should hit a new edge, and the interesting input
- // should attempt minimization
- Y(io.Discard, s)
- }
- if i > 0 {
- // Don't let it fail right away.
- i--
- } else if utf8.RuneCountInString(s) == len(s) && len(s) <= 100 {
- // Make sure this only fails if the number of bytes in the
- // marshaled string is the same as the unmarshaled string,
- // so that we can check the length of the testdata file.
- t.Error("flaky failure")
- if len(s) == 50 {
- t.Error("got the minimum size!")
- }
+ t.Error("bad input")
+ if len(b) == 1 {
+ t.Error("got the minimum size!")
}
})
}
+var fuzzing bool
+
func FuzzMinimizerNonrecoverableCrashInMinimization(f *testing.F) {
- seed := strings.Repeat("A", 1000)
+ seed := bytes.Repeat([]byte{255}, 100)
f.Add(seed)
- i := 3
- f.Fuzz(func(t *testing.T, s string) {
- if len(s) < 50 || len(s) > 1100 {
+ f.Fuzz(func(t *testing.T, b []byte) {
+ if bytes.Equal(seed, b) {
return
- }
- if s != seed {
- Y(io.Discard, s)
- }
- if i > 0 {
- i--
- } else if utf8.RuneCountInString(s) == len(s) && len(s) <= 100 {
- os.Exit(19)
+ } else if len(b) == 1 {
+ os.Exit(1)
}
})
}
@@ -138,10 +112,12 @@ func FuzzMinCache(f *testing.F) {
package main
import (
+ "bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
+ "regexp"
"strconv"
)
@@ -165,22 +141,36 @@ func main() {
os.Exit(1)
}
- fname := files[0].Name()
- contents, err := ioutil.ReadFile(filepath.Join(dir, fname))
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
+ for _, f := range files {
+ data, err := ioutil.ReadFile(filepath.Join(dir, f.Name()))
+ if err != nil {
+ panic(err)
+ }
+ var containsVal bool
+ for _, line := range bytes.Split(data, []byte("\n")) {
+ m := valRe.FindSubmatch(line)
+ if m == nil {
+ continue
+ }
+ containsVal = true
+ s, err := strconv.Unquote(string(m[1]))
+ if err != nil {
+ panic(err)
+ }
+ if len(s) != wantLen {
+ fmt.Fprintf(os.Stderr, "expect length %d, got %d (%q)\n", wantLen, len(s), line)
+ os.Exit(1)
+ }
+ }
+ if !containsVal {
+ fmt.Fprintln(os.Stderr, "corpus file contained no values")
+ os.Exit(1)
+ }
}
- contentsLen := len(contents) - len(`go test fuzz v1
-string("")
-`)
- if got, want := contentsLen, wantLen; got > want {
- fmt.Fprintf(os.Stderr, "expect length <= %d, got %d\n", want, got)
- os.Exit(1)
- }
- fmt.Fprintf(os.Stderr, "%s\n", contents)
}
+var valRe = regexp.MustCompile(`^\[\]byte\(([^)]+)\)$`)
+
-- check_cache/check_cache.go --
//go:build ignore
// +build ignore
diff --git a/src/cmd/go/testdata/script/version_buildvcs_nested.txt b/src/cmd/go/testdata/script/version_buildvcs_nested.txt
index 08d4c92baf..a0c69f9c12 100644
--- a/src/cmd/go/testdata/script/version_buildvcs_nested.txt
+++ b/src/cmd/go/testdata/script/version_buildvcs_nested.txt
@@ -1,7 +1,7 @@
[!exec:git] skip
[!exec:hg] skip
[short] skip
-env GOFLAGS=-n
+env GOFLAGS='-n -buildvcs'
# Create a root module in a root Git repository.
mkdir root
diff --git a/src/cmd/gofmt/doc.go b/src/cmd/gofmt/doc.go
index e340665594..8ac9c6a931 100644
--- a/src/cmd/gofmt/doc.go
+++ b/src/cmd/gofmt/doc.go
@@ -13,9 +13,11 @@ that directory, recursively. (Files starting with a period are ignored.)
By default, gofmt prints the reformatted sources to standard output.
Usage:
+
gofmt [flags] [path ...]
The flags are:
+
-d
Do not print reformatted sources to standard output.
If a file's formatting is different than gofmt's, print diffs
@@ -37,10 +39,10 @@ The flags are:
the original file is restored from an automatic backup.
Debugging support:
+
-cpuprofile filename
Write cpu profile to the specified file.
-
The rewrite rule specified with the -r flag must be a string of the form:
pattern -> replacement
@@ -57,7 +59,7 @@ such a fragment, gofmt preserves leading indentation as well as leading
and trailing spaces, so that individual sections of a Go program can be
formatted by piping them through gofmt.
-Examples
+# Examples
To check files for unnecessary parentheses:
@@ -71,7 +73,7 @@ To convert the package tree from explicit slice upper bounds to implicit ones:
gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src
-The simplify command
+# The simplify command
When invoked with -s gofmt will make the following source transformations where possible.
diff --git a/src/cmd/internal/bio/buf_mmap.go b/src/cmd/internal/bio/buf_mmap.go
index b9755c7e50..89ae39f736 100644
--- a/src/cmd/internal/bio/buf_mmap.go
+++ b/src/cmd/internal/bio/buf_mmap.go
@@ -18,12 +18,12 @@ import (
// because some operating systems place a limit on the number of
// distinct mapped regions per process. As of this writing:
//
-// Darwin unlimited
-// DragonFly 1000000 (vm.max_proc_mmap)
-// FreeBSD unlimited
-// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count?
-// NetBSD unlimited
-// OpenBSD unlimited
+// Darwin unlimited
+// DragonFly 1000000 (vm.max_proc_mmap)
+// FreeBSD unlimited
+// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count?
+// NetBSD unlimited
+// OpenBSD unlimited
var mmapLimit int32 = 1<<31 - 1
func init() {
diff --git a/src/cmd/internal/gcprog/gcprog.go b/src/cmd/internal/gcprog/gcprog.go
index c8bf206468..eeea53daf4 100644
--- a/src/cmd/internal/gcprog/gcprog.go
+++ b/src/cmd/internal/gcprog/gcprog.go
@@ -5,7 +5,7 @@
// Package gcprog implements an encoder for packed GC pointer bitmaps,
// known as GC programs.
//
-// Program Format
+// # Program Format
//
// The GC program encodes a sequence of 0 and 1 bits indicating scalar or pointer words in an object.
// The encoding is a simple Lempel-Ziv program, with codes to emit literal bits and to repeat the
@@ -20,7 +20,6 @@
//
// The numbers n and c, when they follow a code, are encoded as varints
// using the same encoding as encoding/binary's Uvarint.
-//
package gcprog
import (
diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go
index af2a0df338..3e36c461fa 100644
--- a/src/cmd/internal/goobj/objfile.go
+++ b/src/cmd/internal/goobj/objfile.go
@@ -264,15 +264,16 @@ func (p *ImportedPkg) Write(w *Writer) {
// Symbol definition.
//
// Serialized format:
-// Sym struct {
-// Name string
-// ABI uint16
-// Type uint8
-// Flag uint8
-// Flag2 uint8
-// Siz uint32
-// Align uint32
-// }
+//
+// Sym struct {
+// Name string
+// ABI uint16
+// Type uint8
+// Flag uint8
+// Flag2 uint8
+// Siz uint32
+// Align uint32
+// }
type Sym [SymSize]byte
const SymSize = stringRefSize + 2 + 1 + 1 + 1 + 4 + 4
@@ -371,13 +372,14 @@ const HashSize = sha1.Size
// Relocation.
//
// Serialized format:
-// Reloc struct {
-// Off int32
-// Siz uint8
-// Type uint16
-// Add int64
-// Sym SymRef
-// }
+//
+// Reloc struct {
+// Off int32
+// Siz uint8
+// Type uint16
+// Add int64
+// Sym SymRef
+// }
type Reloc [RelocSize]byte
const RelocSize = 4 + 1 + 2 + 8 + 8
@@ -415,10 +417,11 @@ func (r *Reloc) fromBytes(b []byte) { copy(r[:], b) }
// Aux symbol info.
//
// Serialized format:
-// Aux struct {
-// Type uint8
-// Sym SymRef
-// }
+//
+// Aux struct {
+// Type uint8
+// Sym SymRef
+// }
type Aux [AuxSize]byte
const AuxSize = 1 + 8
@@ -458,11 +461,12 @@ func (a *Aux) fromBytes(b []byte) { copy(a[:], b) }
// Referenced symbol flags.
//
// Serialized format:
-// RefFlags struct {
-// Sym symRef
-// Flag uint8
-// Flag2 uint8
-// }
+//
+// RefFlags struct {
+// Sym symRef
+// Flag uint8
+// Flag2 uint8
+// }
type RefFlags [RefFlagsSize]byte
const RefFlagsSize = 8 + 1 + 1
@@ -490,10 +494,11 @@ const huge = (1<<31 - 1) / RelocSize
// Referenced symbol name.
//
// Serialized format:
-// RefName struct {
-// Sym symRef
-// Name string
-// }
+//
+// RefName struct {
+// Sym symRef
+// Name string
+// }
type RefName [RefNameSize]byte
const RefNameSize = 8 + stringRefSize
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 72c4cd48ed..57d4e7a8d3 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -3977,7 +3977,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
if (o1&S64) == 0 && s >= 2 {
c.ctxt.Diag("illegal bit position\n%v", p)
}
- if ((d >> uint(s*16)) >> 16) != 0 {
+ if ((uint64(d) >> uint(s*16)) >> 16) != 0 {
c.ctxt.Diag("requires uimm16\n%v", p)
}
rt := int(p.To.Reg)
diff --git a/src/cmd/internal/obj/arm64/asm_arm64_test.go b/src/cmd/internal/obj/arm64/asm_arm64_test.go
index c6a00f5b94..f468b6b0fe 100644
--- a/src/cmd/internal/obj/arm64/asm_arm64_test.go
+++ b/src/cmd/internal/obj/arm64/asm_arm64_test.go
@@ -160,3 +160,14 @@ func TestVMOVQ(t *testing.T) {
t.Errorf("TestVMOVQ got: a=0x%x, b=0x%x, want: a=0x7040201008040201, b=0x3040201008040201", a, b)
}
}
+
+func testmovk() uint64
+
+// TestMOVK makes sure MOVK with a very large constant works. See issue 52261.
+func TestMOVK(t *testing.T) {
+ x := testmovk()
+ want := uint64(40000 << 48)
+ if x != want {
+ t.Errorf("TestMOVK got %x want %x\n", x, want)
+ }
+}
diff --git a/src/cmd/internal/obj/arm64/asm_arm64_test.s b/src/cmd/internal/obj/arm64/asm_arm64_test.s
index 9d337a4fd1..f85433c6e3 100644
--- a/src/cmd/internal/obj/arm64/asm_arm64_test.s
+++ b/src/cmd/internal/obj/arm64/asm_arm64_test.s
@@ -12,3 +12,10 @@ TEXT ·testvmovq(SB), NOSPLIT, $0-16
MOVD R0, r1+0(FP)
MOVD R1, r2+8(FP)
RET
+
+// testmovk() uint64
+TEXT ·testmovk(SB), NOSPLIT, $0-8
+ MOVD $0, R0
+ MOVK $(40000<<48), R0
+ MOVD R0, ret+0(FP)
+ RET
diff --git a/src/cmd/internal/obj/arm64/doc.go b/src/cmd/internal/obj/arm64/doc.go
index 2763cf4139..c12f618e93 100644
--- a/src/cmd/internal/obj/arm64/doc.go
+++ b/src/cmd/internal/obj/arm64/doc.go
@@ -6,24 +6,26 @@
Package arm64 implements an ARM64 assembler. Go assembly syntax is different from GNU ARM64
syntax, but we can still follow the general rules to map between them.
-Instructions mnemonics mapping rules
+# Instructions mnemonics mapping rules
1. Most instructions use width suffixes of instruction names to indicate operand width rather than
using different register names.
Examples:
- ADC R24, R14, R12 <=> adc x12, x24
- ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24
- FCMPS F2, F3 <=> fcmp s3, s2
- FCMPD F2, F3 <=> fcmp d3, d2
- FCVTDH F2, F3 <=> fcvt h3, d2
+
+ ADC R24, R14, R12 <=> adc x12, x24
+ ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24
+ FCMPS F2, F3 <=> fcmp s3, s2
+ FCMPD F2, F3 <=> fcmp d3, d2
+ FCVTDH F2, F3 <=> fcvt h3, d2
2. Go uses .P and .W suffixes to indicate post-increment and pre-increment.
Examples:
- MOVD.P -8(R10), R8 <=> ldr x8, [x10],#-8
- MOVB.W 16(R16), R10 <=> ldrsb x10, [x16,#16]!
- MOVBU.W 16(R16), R10 <=> ldrb x10, [x16,#16]!
+
+ MOVD.P -8(R10), R8 <=> ldr x8, [x10],#-8
+ MOVB.W 16(R16), R10 <=> ldrsb x10, [x16,#16]!
+ MOVBU.W 16(R16), R10 <=> ldrb x10, [x16,#16]!
3. Go uses a series of MOV instructions as load and store.
@@ -40,11 +42,12 @@ ldrsh, sturh, strh => MOVH.
instructions and floating-point(scalar) instructions.
Examples:
- VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h
- VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11
- VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s
- AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b
- SCVTFWS R3, F16 <=> scvtf s17, w6
+
+ VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h
+ VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11
+ VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s
+ AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b
+ SCVTFWS R3, F16 <=> scvtf s17, w6
6. Align directive
@@ -53,10 +56,11 @@ to a specified boundary by padding with NOOP instruction. The alignment value su
must be a power of 2 and in the range of [8, 2048].
Examples:
- PCALIGN $16
- MOVD $2, R0 // This instruction is aligned with 16 bytes.
- PCALIGN $1024
- MOVD $3, R1 // This instruction is aligned with 1024 bytes.
+
+ PCALIGN $16
+ MOVD $2, R0 // This instruction is aligned with 16 bytes.
+ PCALIGN $1024
+ MOVD $3, R1 // This instruction is aligned with 1024 bytes.
PCALIGN also changes the function alignment. If a function has one or more PCALIGN directives,
its address will be aligned to the same or coarser boundary, which is the maximum of all the
@@ -65,13 +69,14 @@ alignment values.
In the following example, the function Add is aligned with 128 bytes.
Examples:
- TEXT ·Add(SB),$40-16
- MOVD $2, R0
- PCALIGN $32
- MOVD $4, R1
- PCALIGN $128
- MOVD $8, R2
- RET
+
+ TEXT ·Add(SB),$40-16
+ MOVD $2, R0
+ PCALIGN $32
+ MOVD $4, R1
+ PCALIGN $128
+ MOVD $8, R2
+ RET
On arm64, functions in Go are aligned to 16 bytes by default, we can also use PCALGIN to set the
function alignment. The functions that need to be aligned are preferably using NOFRAME and NOSPLIT
@@ -81,11 +86,12 @@ have the same alignment as the first hand-written instruction.
In the following example, PCALIGN at the entry of the function Add will align its address to 2048 bytes.
Examples:
- TEXT ·Add(SB),NOSPLIT|NOFRAME,$0
- PCALIGN $2048
- MOVD $1, R0
- MOVD $1, R1
- RET
+
+ TEXT ·Add(SB),NOSPLIT|NOFRAME,$0
+ PCALIGN $2048
+ MOVD $1, R0
+ MOVD $1, R1
+ RET
7. Move large constants to vector registers.
@@ -93,9 +99,10 @@ Go asm uses VMOVQ/VMOVD/VMOVS to move 128-bit, 64-bit and 32-bit constants into
And for a 128-bit interger, it take two 64-bit operands, for the low and high parts separately.
Examples:
- VMOVS $0x11223344, V0
- VMOVD $0x1122334455667788, V1
- VMOVQ $0x1122334455667788, $0x99aabbccddeeff00, V2 // V2=0x99aabbccddeeff001122334455667788
+
+ VMOVS $0x11223344, V0
+ VMOVD $0x1122334455667788, V1
+ VMOVQ $0x1122334455667788, $0x99aabbccddeeff00, V2 // V2=0x99aabbccddeeff001122334455667788
8. Move an optionally-shifted 16-bit immediate value to a register.
@@ -106,9 +113,10 @@ is the 16-bit unsigned immediate, in the range 0 to 65535; For the 32-bit varian
The current Go assembler does not accept zero shifts, such as "op $0, Rd" and "op $(0<<(16|32|48)), Rd" instructions.
Examples:
- MOVK $(10<<32), R20 <=> movk x20, #10, lsl #32
- MOVZW $(20<<16), R8 <=> movz w8, #20, lsl #16
- MOVK $(0<<16), R10 will be reported as an error by the assembler.
+
+ MOVK $(10<<32), R20 <=> movk x20, #10, lsl #32
+ MOVZW $(20<<16), R8 <=> movz w8, #20, lsl #16
+ MOVK $(0<<16), R10 will be reported as an error by the assembler.
Special Cases.
@@ -123,15 +131,15 @@ related to real ARM64 instruction. NOOP serves for the hardware nop instruction.
HINT $0.
Examples:
- VMOV V13.B[1], R20 <=> mov x20, v13.b[1]
- VMOV V13.H[1], R20 <=> mov w20, v13.h[1]
- JMP (R3) <=> br x3
- CALL (R17) <=> blr x17
- LDAXRB (R19), R16 <=> ldaxrb w16, [x19]
- NOOP <=> nop
+ VMOV V13.B[1], R20 <=> mov x20, v13.b[1]
+ VMOV V13.H[1], R20 <=> mov w20, v13.h[1]
+ JMP (R3) <=> br x3
+ CALL (R17) <=> blr x17
+ LDAXRB (R19), R16 <=> ldaxrb w16, [x19]
+ NOOP <=> nop
-Register mapping rules
+# Register mapping rules
1. All basic register names are written as Rn.
@@ -140,16 +148,16 @@ Register mapping rules
3. Bn, Hn, Dn, Sn and Qn instructions are written as Fn in floating-point instructions and as Vn
in SIMD instructions.
-
-Argument mapping rules
+# Argument mapping rules
1. The operands appear in left-to-right assignment order.
Go reverses the arguments of most instructions.
Examples:
- ADD R11.SXTB<<1, RSP, R25 <=> add x25, sp, w11, sxtb #1
- VADD V16, V19, V14 <=> add d14, d19, d16
+
+ ADD R11.SXTB<<1, RSP, R25 <=> add x25, sp, w11, sxtb #1
+ VADD V16, V19, V14 <=> add d14, d19, d16
Special Cases.
@@ -157,70 +165,79 @@ Special Cases.
such as str, stur, strb, sturb, strh, sturh stlr, stlrb. stlrh, st1.
Examples:
- MOVD R29, 384(R19) <=> str x29, [x19,#384]
- MOVB.P R30, 30(R4) <=> strb w30, [x4],#30
- STLRH R21, (R19) <=> stlrh w21, [x19]
+
+ MOVD R29, 384(R19) <=> str x29, [x19,#384]
+ MOVB.P R30, 30(R4) <=> strb w30, [x4],#30
+ STLRH R21, (R19) <=> stlrh w21, [x19]
(2) MADD, MADDW, MSUB, MSUBW, SMADDL, SMSUBL, UMADDL, UMSUBL , , ,
Examples:
- MADD R2, R30, R22, R6 <=> madd x6, x22, x2, x30
- SMSUBL R10, R3, R17, R27 <=> smsubl x27, w17, w10, x3
+
+ MADD R2, R30, R22, R6 <=> madd x6, x22, x2, x30
+ SMSUBL R10, R3, R17, R27 <=> smsubl x27, w17, w10, x3
(3) FMADDD, FMADDS, FMSUBD, FMSUBS, FNMADDD, FNMADDS, FNMSUBD, FNMSUBS , , ,
Examples:
- FMADDD F30, F20, F3, F29 <=> fmadd d29, d3, d30, d20
- FNMSUBS F7, F25, F7, F22 <=> fnmsub s22, s7, s7, s25
+
+ FMADDD F30, F20, F3, F29 <=> fmadd d29, d3, d30, d20
+ FNMSUBS F7, F25, F7, F22 <=> fnmsub s22, s7, s7, s25
(4) BFI, BFXIL, SBFIZ, SBFX, UBFIZ, UBFX $, , $,
Examples:
- BFIW $16, R20, $6, R0 <=> bfi w0, w20, #16, #6
- UBFIZ $34, R26, $5, R20 <=> ubfiz x20, x26, #34, #5
+
+ BFIW $16, R20, $6, R0 <=> bfi w0, w20, #16, #6
+ UBFIZ $34, R26, $5, R20 <=> ubfiz x20, x26, #34, #5
(5) FCCMPD, FCCMPS, FCCMPED, FCCMPES , Fm. Fn, $
Examples:
- FCCMPD AL, F8, F26, $0 <=> fccmp d26, d8, #0x0, al
- FCCMPS VS, F29, F4, $4 <=> fccmp s4, s29, #0x4, vs
- FCCMPED LE, F20, F5, $13 <=> fccmpe d5, d20, #0xd, le
- FCCMPES NE, F26, F10, $0 <=> fccmpe s10, s26, #0x0, ne
+
+ FCCMPD AL, F8, F26, $0 <=> fccmp d26, d8, #0x0, al
+ FCCMPS VS, F29, F4, $4 <=> fccmp s4, s29, #0x4, vs
+ FCCMPED LE, F20, F5, $13 <=> fccmpe d5, d20, #0xd, le
+ FCCMPES NE, F26, F10, $0 <=> fccmpe s10, s26, #0x0, ne
(6) CCMN, CCMNW, CCMP, CCMPW , , $, $
Examples:
- CCMP MI, R22, $12, $13 <=> ccmp x22, #0xc, #0xd, mi
- CCMNW AL, R1, $11, $8 <=> ccmn w1, #0xb, #0x8, al
+
+ CCMP MI, R22, $12, $13 <=> ccmp x22, #0xc, #0xd, mi
+ CCMNW AL, R1, $11, $8 <=> ccmn w1, #0xb, #0x8, al
(7) CCMN, CCMNW, CCMP, CCMPW , , , $
Examples:
- CCMN VS, R13, R22, $10 <=> ccmn x13, x22, #0xa, vs
- CCMPW HS, R19, R14, $11 <=> ccmp w19, w14, #0xb, cs
+
+ CCMN VS, R13, R22, $10 <=> ccmn x13, x22, #0xa, vs
+ CCMPW HS, R19, R14, $11 <=> ccmp w19, w14, #0xb, cs
(9) CSEL, CSELW, CSNEG, CSNEGW, CSINC, CSINCW , , , ;
FCSELD, FCSELS , , ,
Examples:
- CSEL GT, R0, R19, R1 <=> csel x1, x0, x19, gt
- CSNEGW GT, R7, R17, R8 <=> csneg w8, w7, w17, gt
- FCSELD EQ, F15, F18, F16 <=> fcsel d16, d15, d18, eq
+
+ CSEL GT, R0, R19, R1 <=> csel x1, x0, x19, gt
+ CSNEGW GT, R7, R17, R8 <=> csneg w8, w7, w17, gt
+ FCSELD EQ, F15, F18, F16 <=> fcsel d16, d15, d18, eq
(10) TBNZ, TBZ $,