diff --git a/api/except.txt b/api/except.txt index 9f7f3fe934..a911783c6b 100644 --- a/api/except.txt +++ b/api/except.txt @@ -6,6 +6,8 @@ pkg os, const ModeType = 2399141888 pkg os, const ModeType = 2399666176 pkg os (linux-arm), const O_SYNC = 4096 pkg os (linux-arm-cgo), const O_SYNC = 4096 +pkg os (linux-arm), const O_SYNC = 1052672 +pkg os (linux-arm-cgo), const O_SYNC = 1052672 pkg syscall (darwin-386), const ImplementsGetwd = false pkg syscall (darwin-386), func Fchflags(string, int) error pkg syscall (darwin-386-cgo), const ImplementsGetwd = false @@ -381,3 +383,101 @@ pkg syscall (windows-amd64), type CertRevocationInfo struct, CrlInfo uintptr pkg syscall (windows-amd64), type CertRevocationInfo struct, OidSpecificInfo uintptr pkg syscall (windows-amd64), type CertSimpleChain struct, TrustListInfo uintptr pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [96]int8 +pkg syscall (freebsd-386), func Mknod(string, uint32, int) error +pkg syscall (freebsd-386), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-386), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-386), type Stat_t struct, Atimespec Timespec +pkg syscall (freebsd-386), type Stat_t struct, Birthtimespec Timespec +pkg syscall (freebsd-386), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-386), type Stat_t struct, Ctimespec Timespec +pkg syscall (freebsd-386), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-386), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-386), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-386), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-386), type Stat_t struct, Mtimespec Timespec +pkg syscall (freebsd-386), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-386), type Stat_t struct, Pad_cgo_0 [8]uint8 +pkg syscall (freebsd-386), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-386), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-386), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-386-cgo), func Mknod(string, uint32, int) error +pkg syscall (freebsd-386-cgo), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-386-cgo), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Atimespec Timespec +pkg syscall (freebsd-386-cgo), type Stat_t struct, Birthtimespec Timespec +pkg syscall (freebsd-386-cgo), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Ctimespec Timespec +pkg syscall (freebsd-386-cgo), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Mtimespec Timespec +pkg syscall (freebsd-386-cgo), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Pad_cgo_0 [8]uint8 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-amd64), func Mknod(string, uint32, int) error +pkg syscall (freebsd-amd64), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-amd64), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-amd64), type Stat_t struct, Atimespec Timespec +pkg syscall (freebsd-amd64), type Stat_t struct, Birthtimespec Timespec +pkg syscall (freebsd-amd64), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Ctimespec Timespec +pkg syscall (freebsd-amd64), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-amd64), type Stat_t struct, Mtimespec Timespec +pkg syscall (freebsd-amd64), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-amd64), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-amd64), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-amd64), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-amd64-cgo), func Mknod(string, uint32, int) error +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Atimespec Timespec +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Birthtimespec Timespec +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ctimespec Timespec +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Mtimespec Timespec +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-arm), func Mknod(string, uint32, int) error +pkg syscall (freebsd-arm), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-arm), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-arm), type Stat_t struct, Atimespec Timespec +pkg syscall (freebsd-arm), type Stat_t struct, Birthtimespec Timespec +pkg syscall (freebsd-arm), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Ctimespec Timespec +pkg syscall (freebsd-arm), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-arm), type Stat_t struct, Mtimespec Timespec +pkg syscall (freebsd-arm), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-arm), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-arm), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-arm), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-arm-cgo), func Mknod(string, uint32, int) error +pkg syscall (freebsd-arm-cgo), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-arm-cgo), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Atimespec Timespec +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Birthtimespec Timespec +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ctimespec Timespec +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Mtimespec Timespec +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [88]int8 diff --git a/doc/docs.html b/doc/docs.html index 955eb3044e..8f79d3a770 100644 --- a/doc/docs.html +++ b/doc/docs.html @@ -50,10 +50,10 @@ learned. You can {{if not $.GoogleCN}}take the tour online or{{end}} install it locally with:
-$ go get golang.org/x/tour/gotour +$ go get golang.org/x/tour
-This will place the gotour binary in your workspace's bin directory.
+This will place the tour binary in your workspace's bin directory.
-Note that these are only a few top solutions; a more comphensive +Note that these are only a few top solutions; a more comprehensive community-maintained list of IDEs and text editor plugins is available at the Wiki. diff --git a/doc/effective_go.html b/doc/effective_go.html index 89c1d08782..5d184b76a9 100644 --- a/doc/effective_go.html +++ b/doc/effective_go.html @@ -1402,11 +1402,11 @@ the moment, the following snippet would also read the first 32 bytes of the buff var err error for i := 0; i < 32; i++ { nbytes, e := f.Read(buf[i:i+1]) // Read one byte. + n += nbytes if nbytes == 0 || e != nil { err = e break } - n += nbytes }
@@ -2762,7 +2762,7 @@ type Job struct { }
-The Job type now has the Log, Logf
+The Job type now has the Print, Printf, Println
and other
methods of *log.Logger. We could have given the Logger
a field name, of course, but it's not necessary to do so. And now, once
@@ -2770,7 +2770,7 @@ initialized, we can
log to the Job:
-job.Log("starting now...")
+job.Println("starting now...")
The Logger is a regular field of the Job struct,
@@ -2797,8 +2797,8 @@ we would write job.Logger,
which would be useful if we wanted to refine the methods of Logger.
-func (job *Job) Logf(format string, args ...interface{}) {
- job.Logger.Logf("%q: %s", job.Command, fmt.Sprintf(format, args...))
+func (job *Job) Printf(format string, args ...interface{}) {
+ job.Logger.Printf("%q: %s", job.Command, fmt.Sprintf(format, args...))
}
diff --git a/doc/go_spec.html b/doc/go_spec.html
index 32336e86f8..cc2bada913 100644
--- a/doc/go_spec.html
+++ b/doc/go_spec.html
@@ -1,6 +1,6 @@
@@ -811,7 +811,7 @@ To avoid portability issues all numeric types are de
types and thus distinct except
byte, which is an alias for uint8, and
rune, which is an alias for int32.
-Conversions
+Explicit conversions
are required when different numeric types are mixed in an expression
or assignment. For instance, int32 and int
are not the same type even though they may have the same size on a
@@ -1348,7 +1348,7 @@ ChannelType = ( "chan" | "chan" "<-" | "<-" "chan" ) ElementType .
The optional <- operator specifies the channel direction,
send or receive. If no direction is given, the channel is
bidirectional.
-A channel may be constrained only to send or only to receive by
+A channel may be constrained only to send or only to receive by explicit
conversion or assignment.
bool.
+if it is an untyped boolean value, it is first implicitly converted to type bool.
The predeclared value nil cannot be used to initialize a variable
with no explicit type.
@@ -2202,11 +2202,11 @@ Receiver = Parameters .
The receiver is specified via an extra parameter section preceding the method
name. That parameter section must declare a single non-variadic parameter, the receiver.
-Its type must be of the form T or *T (possibly using
-parentheses) where T is a type name. The type denoted by T is called
-the receiver base type; it must not be a pointer or interface type and
-it must be defined in the same package as the method.
-The method is said to be bound to the base type and the method name
+Its type must be a defined type T or a
+pointer to a defined type T. T is called the receiver
+base type. A receiver base type cannot be a pointer or interface type and
+it must be defined in the same package as the method.
+The method is said to be bound to its receiver base type and the method name
is visible only within selectors for type T
or *T.
-Given type Point, the declarations
+Given defined type Point, the declarations
@@ -3260,7 +3260,7 @@ var v, ok T1 = x.(T) yields an additional untyped boolean value. The value ofokistrueif the assertion holds. Otherwise it isfalseand the value ofvis the zero value for typeT. -No run-time panic occurs in this case. +No run-time panic occurs in this case. @@ -3433,7 +3433,7 @@ For operations involving constants only, see the section onExcept for shift operations, if one operand is an untyped constant -and the other operand is not, the constant is converted +and the other operand is not, the constant is implicitly converted to the type of the other operand.
@@ -3442,7 +3442,7 @@ The right operand in a shift expression must have unsigned integer type or be an untyped constant representable by a value of typeuint. If the left operand of a non-constant shift expression is an untyped constant, -it is first converted to the type it would assume if the shift expression were +it is first implicitly converted to the type it would assume if the shift expression were replaced by its left operand alone. @@ -3624,7 +3624,7 @@ For signed integers, the operations+,-,*,/, and<<may legally overflow and the resulting value exists and is deterministically defined by the signed integer representation, the operation, and its operands. -No exception is raised as a result of overflow. +Overflow does not cause a run-time panic. A compiler may not optimize code under the assumption that overflow does not occur. For instance, it may not assume thatx < x + 1is always true. @@ -3645,7 +3645,7 @@ occurs is implementation-specific. An implementation may combine multiple floating-point operations into a single fused operation, possibly across statements, and produce a result that differs from the value obtained by executing and rounding the instructions individually. -A floating-point type conversion explicitly rounds to +An explicit floating-point type conversion rounds to the precision of the target type, preventing fusion that would discard that rounding. @@ -3907,7 +3907,14 @@ channel is closed and empty.Conversions
-Conversions are expressions of the form
+ +T(x)+A conversion changes the type of an expression +to the type specified by the conversion. +A conversion may appear literally in the source, or it may be implied +by the context in which an expression appears. ++An explicit conversion is an expression of the form
@@ -3938,7 +3945,7 @@ func() int(x) // x is converted to func() int (unambiguous) A constant valueT(x)whereTis a type andxis an expression that can be converted to typeT.xcan be converted to typeTifxis representable by a value ofT. -As a special case, an integer constantxcan be converted to a +As a special case, an integer constantxcan be explicitly converted to a string type using the same rule as for non-constantx. @@ -4672,13 +4679,13 @@ to the type of the operand to which it is assigned, with the following special c
bool.
+ the blank identifier, it is first implicitly converted to type bool.
-If the switch expression evaluates to an untyped constant, it is first
+If the switch expression evaluates to an untyped constant, it is first implicitly
converted to its default type;
-if it is an untyped boolean value, it is first converted to type bool.
+if it is an untyped boolean value, it is first implicitly converted to type bool.
The predeclared untyped value nil cannot be used as a switch expression.
-If a case expression is untyped, it is first converted
+If a case expression is untyped, it is first implicitly converted
to the type of the switch expression.
For each (possibly converted) case expression x and the value t
of the switch expression, x == t must be a valid comparison.
@@ -5881,7 +5888,7 @@ floating-point type and the return type is the complex type
with the corresponding floating-point constituents:
complex64 for float32 arguments, and
complex128 for float64 arguments.
-If one of the arguments evaluates to an untyped constant, it is first
+If one of the arguments evaluates to an untyped constant, it is first implicitly
converted to the type of the other argument.
If both arguments evaluate to untyped constants, they must be non-complex
numbers or their imaginary parts must be zero, and the return value of
diff --git a/doc/help.html b/doc/help.html
index f668196871..f11e286904 100644
--- a/doc/help.html
+++ b/doc/help.html
@@ -27,6 +27,11 @@ The Go Forum is a discussion
forum for Go programmers.
+Get live support and talk with other gophers on the Go Discord. +
+Get live support from other users in the Go slack channel.
diff --git a/misc/cgo/test/callback.go b/misc/cgo/test/callback.go index b88bf134bc..58e126b41b 100644 --- a/misc/cgo/test/callback.go +++ b/misc/cgo/test/callback.go @@ -295,7 +295,7 @@ func goWithString(s string) { } func testCallbackStack(t *testing.T) { - // Make cgo call and callback with different amount of stack stack available. + // Make cgo call and callback with different amount of stack available. // We do not do any explicit checks, just ensure that it does not crash. for _, f := range splitTests { f() diff --git a/misc/cgo/testplugin/unnamed1/main.go b/misc/cgo/testplugin/unnamed1/main.go index 5c1df086d7..caf09c9e89 100644 --- a/misc/cgo/testplugin/unnamed1/main.go +++ b/misc/cgo/testplugin/unnamed1/main.go @@ -9,7 +9,7 @@ import "C" func FuncInt() int { return 1 } -// Add a recursive type to to check that type equality across plugins doesn't +// Add a recursive type to check that type equality across plugins doesn't // crash. See https://golang.org/issues/19258 func FuncRecursive() X { return X{} } diff --git a/misc/cgo/testsanitizers/cc_test.go b/misc/cgo/testsanitizers/cc_test.go index f09ad52cee..218e225429 100644 --- a/misc/cgo/testsanitizers/cc_test.go +++ b/misc/cgo/testsanitizers/cc_test.go @@ -374,7 +374,7 @@ func (c *config) checkRuntime() (skip bool, err error) { } // libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler. - // Dump the preprocessor defines to check that that works. + // Dump the preprocessor defines to check that works. // (Sometimes it doesn't: see https://golang.org/issue/15983.) cmd, err := cc(c.cFlags...) if err != nil { diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go index 529a2c692f..c3c7a6aab6 100644 --- a/misc/cgo/testshared/shared_test.go +++ b/misc/cgo/testshared/shared_test.go @@ -578,7 +578,7 @@ func TestNotes(t *testing.T) { } // Build a GOPATH package (depBase) into a shared library that links against the goroot -// runtime, another package (dep2) that links against the first, and and an +// runtime, another package (dep2) that links against the first, and an // executable that links against dep2. func TestTwoGopathShlibs(t *testing.T) { goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase") diff --git a/misc/nacl/README b/misc/nacl/README index 99b94dc90a..179e526d89 100644 --- a/misc/nacl/README +++ b/misc/nacl/README @@ -26,7 +26,7 @@ scheme. # Download NaCl Download nacl_sdk.zip file from - https://developers.google.com/native-client/dev/sdk/download + https://developer.chrome.com/native-client/sdk/download and unpack it. I chose /opt/nacl_sdk. # Update @@ -37,7 +37,7 @@ sdk. These are released every 6-8 weeks, in line with Chrome releases. % cd /opt/nacl_sdk % ./naclsdk update -At this time pepper_40 is the stable version. The NaCl port needs at least pepper_39 +At this time pepper_49 is the stable version. The NaCl port needs at least pepper_39 to work. If naclsdk downloads a later version, please adjust accordingly. The cmd/go helper scripts expect that the loaders sel_ldr_{x86_{32,64},arm} and diff --git a/misc/nacl/testzip.proto b/misc/nacl/testzip.proto index 1e9279e4e0..7f524cac48 100644 --- a/misc/nacl/testzip.proto +++ b/misc/nacl/testzip.proto @@ -37,6 +37,9 @@ go src=.. buildid testdata + + xcoff + testdata + + gofmt gofmt.go gofmt_test.go @@ -151,6 +154,9 @@ go src=.. trace testdata + + traceparser + testdata + + io + mime diff --git a/misc/wasm/wasm_exec.js b/misc/wasm/wasm_exec.js index 94b9552c59..78eb306253 100644 --- a/misc/wasm/wasm_exec.js +++ b/misc/wasm/wasm_exec.js @@ -47,10 +47,20 @@ } return buf.length; }, - openSync(path, flags, mode) { + write(fd, buf, offset, length, position, callback) { + if (offset !== 0 || length !== buf.length || position !== null) { + throw new Error("not implemented"); + } + const n = this.writeSync(fd, buf); + callback(null, n); + }, + open(path, flags, mode, callback) { const err = new Error("not implemented"); err.code = "ENOSYS"; - throw err; + callback(err); + }, + fsync(fd, callback) { + callback(null); }, }; } @@ -88,6 +98,9 @@ const loadValue = (addr) => { const f = mem().getFloat64(addr, true); + if (f === 0) { + return undefined; + } if (!isNaN(f)) { return f; } @@ -105,14 +118,18 @@ mem().setUint32(addr, 0, true); return; } + if (v === 0) { + mem().setUint32(addr + 4, nanHead, true); + mem().setUint32(addr, 1, true); + return; + } mem().setFloat64(addr, v, true); return; } switch (v) { case undefined: - mem().setUint32(addr + 4, nanHead, true); - mem().setUint32(addr, 1, true); + mem().setFloat64(addr, 0, true); return; case null: mem().setUint32(addr + 4, nanHead, true); @@ -327,7 +344,7 @@ this._inst = instance; this._values = [ // TODO: garbage collection NaN, - undefined, + 0, null, true, false, @@ -389,14 +406,14 @@ } static _makeCallbackHelper(id, pendingCallbacks, go) { - return function() { + return function () { pendingCallbacks.push({ id: id, args: arguments }); go._resolveCallbackPromise(); }; } static _makeEventCallbackHelper(preventDefault, stopPropagation, stopImmediatePropagation, fn) { - return function(event) { + return function (event) { if (preventDefault) { event.preventDefault(); } diff --git a/src/archive/tar/stat_actime1.go b/src/archive/tar/stat_actime1.go index cf9cc79c59..eb82edb6d9 100644 --- a/src/archive/tar/stat_actime1.go +++ b/src/archive/tar/stat_actime1.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux dragonfly openbsd solaris +// +build linux dragonfly freebsd openbsd solaris package tar diff --git a/src/archive/tar/stat_actime2.go b/src/archive/tar/stat_actime2.go index 6f17dbe307..f707012714 100644 --- a/src/archive/tar/stat_actime2.go +++ b/src/archive/tar/stat_actime2.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin freebsd netbsd +// +build darwin netbsd package tar diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go index 5f0c0a1a55..cdc534eaf0 100644 --- a/src/archive/zip/writer.go +++ b/src/archive/zip/writer.go @@ -178,7 +178,7 @@ func (w *Writer) Close() error { return err } - // store max values in the regular end record to signal that + // store max values in the regular end record to signal // that the zip64 values should be used instead records = uint16max size = uint32max diff --git a/src/bytes/compare_test.go b/src/bytes/compare_test.go index 3e33c27c9c..a321f2e086 100644 --- a/src/bytes/compare_test.go +++ b/src/bytes/compare_test.go @@ -41,9 +41,16 @@ var compareTests = []struct { func TestCompare(t *testing.T) { for _, tt := range compareTests { - cmp := Compare(tt.a, tt.b) - if cmp != tt.i { - t.Errorf(`Compare(%q, %q) = %v`, tt.a, tt.b, cmp) + numShifts := 16 + buffer := make([]byte, len(tt.b)+numShifts) + // vary the input alignment of tt.b + for offset := 0; offset <= numShifts; offset++ { + shiftedB := buffer[offset : len(tt.b)+offset] + copy(shiftedB, tt.b) + cmp := Compare(tt.a, shiftedB) + if cmp != tt.i { + t.Errorf(`Compare(%q, %q), offset %d = %v; want %v`, tt.a, tt.b, offset, cmp, tt.i) + } } } } diff --git a/src/cmd/api/goapi.go b/src/cmd/api/goapi.go index 8cc78c01ed..9698f25b51 100644 --- a/src/cmd/api/goapi.go +++ b/src/cmd/api/goapi.go @@ -385,9 +385,7 @@ func (w *Walker) parseFile(dir, file string) (*ast.File, error) { return f, nil } -// The package cache doesn't operate correctly in rare (so far artificial) -// circumstances (issue 8425). Disable before debugging non-obvious errors -// from the type-checker. +// Disable before debugging non-obvious errors from the type-checker. const usePkgCache = true var ( @@ -398,7 +396,7 @@ var ( // tagKey returns the tag-based key to use in the pkgCache. // It is a comma-separated string; the first part is dir, the rest tags. // The satisfied tags are derived from context but only those that -// matter (the ones listed in the tags argument) are used. +// matter (the ones listed in the tags argument plus GOOS and GOARCH) are used. // The tags list, which came from go/build's Package.AllTags, // is known to be sorted. func tagKey(dir string, context *build.Context, tags []string) string { @@ -414,9 +412,17 @@ func tagKey(dir string, context *build.Context, tags []string) string { } // TODO: ReleaseTags (need to load default) key := dir + + // explicit on GOOS and GOARCH as global cache will use "all" cached packages for + // an indirect imported package. See https://github.com/golang/go/issues/21181 + // for more detail. + tags = append(tags, context.GOOS, context.GOARCH) + sort.Strings(tags) + for _, tag := range tags { if ctags[tag] { key += "," + tag + ctags[tag] = false } } return key diff --git a/src/cmd/api/goapi_test.go b/src/cmd/api/goapi_test.go index 3c4e50a21a..1c8e2a345b 100644 --- a/src/cmd/api/goapi_test.go +++ b/src/cmd/api/goapi_test.go @@ -188,3 +188,18 @@ func BenchmarkAll(b *testing.B) { } } } + +func TestIssue21181(t *testing.T) { + for _, c := range contexts { + c.Compiler = build.Default.Compiler + } + for _, context := range contexts { + w := NewWalker(context, "testdata/src/issue21181") + pkg, err := w.Import("p") + if err != nil { + t.Fatalf("%s: (%s-%s) %s %v", err, context.GOOS, context.GOARCH, + pkg.Name(), w.imported) + } + w.export(pkg) + } +} diff --git a/src/cmd/api/testdata/src/issue21181/dep/p.go b/src/cmd/api/testdata/src/issue21181/dep/p.go new file mode 100644 index 0000000000..2d8e0c4cce --- /dev/null +++ b/src/cmd/api/testdata/src/issue21181/dep/p.go @@ -0,0 +1,5 @@ +package dep + +type Interface interface { + N([]byte) +} diff --git a/src/cmd/api/testdata/src/issue21181/dep/p_amd64.go b/src/cmd/api/testdata/src/issue21181/dep/p_amd64.go new file mode 100644 index 0000000000..8a2343a0e2 --- /dev/null +++ b/src/cmd/api/testdata/src/issue21181/dep/p_amd64.go @@ -0,0 +1 @@ +package dep diff --git a/src/cmd/api/testdata/src/issue21181/indirect/p.go b/src/cmd/api/testdata/src/issue21181/indirect/p.go new file mode 100644 index 0000000000..e37cf3fc44 --- /dev/null +++ b/src/cmd/api/testdata/src/issue21181/indirect/p.go @@ -0,0 +1,5 @@ +package indirect + +import "dep" + +func F(dep.Interface) {} diff --git a/src/cmd/api/testdata/src/issue21181/p/p.go b/src/cmd/api/testdata/src/issue21181/p/p.go new file mode 100644 index 0000000000..a704160edc --- /dev/null +++ b/src/cmd/api/testdata/src/issue21181/p/p.go @@ -0,0 +1,9 @@ +package p + +import ( + "dep" +) + +type algo struct { + indrt func(dep.Interface) +} diff --git a/src/cmd/api/testdata/src/issue21181/p/p_amd64.go b/src/cmd/api/testdata/src/issue21181/p/p_amd64.go new file mode 100644 index 0000000000..02b4cbf036 --- /dev/null +++ b/src/cmd/api/testdata/src/issue21181/p/p_amd64.go @@ -0,0 +1,7 @@ +package p + +import "indirect" + +var in = []algo{ + {indirect.F}, +} diff --git a/src/cmd/api/testdata/src/issue21181/p/p_generic.go b/src/cmd/api/testdata/src/issue21181/p/p_generic.go new file mode 100644 index 0000000000..4d75809676 --- /dev/null +++ b/src/cmd/api/testdata/src/issue21181/p/p_generic.go @@ -0,0 +1,11 @@ +// +build !amd64 + +package p + +import ( + "indirect" +) + +var in = []algo{ + {indirect.F}, +} diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index 627be09d08..5da64f135a 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -308,6 +308,28 @@ func (p *Parser) asmPCData(operands [][]lex.Token) { p.append(prog, "", true) } +// asmPCAlign assembles a PCALIGN pseudo-op. +// PCALIGN $16 +func (p *Parser) asmPCAlign(operands [][]lex.Token) { + if len(operands) != 1 { + p.errorf("expect one operand for PCALIGN") + return + } + + // Operand 0 must be an immediate constant. + key := p.address(operands[0]) + if !p.validImmediate("PCALIGN", &key) { + return + } + + prog := &obj.Prog{ + Ctxt: p.ctxt, + As: obj.APCALIGN, + From: key, + } + p.append(prog, "", true) +} + // asmFuncData assembles a FUNCDATA pseudo-op. // FUNCDATA $1, funcdata<>+4(SB) func (p *Parser) asmFuncData(operands [][]lex.Token) { diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index 48749b7874..e77db9fba1 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -227,6 +227,8 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool { p.asmGlobl(operands) case "PCDATA": p.asmPCData(operands) + case "PCALIGN": + p.asmPCAlign(operands) case "TEXT": p.asmText(operands) default: diff --git a/src/cmd/asm/internal/asm/testdata/386enc.s b/src/cmd/asm/internal/asm/testdata/386enc.s index 15d1705c97..4af6de36d1 100644 --- a/src/cmd/asm/internal/asm/testdata/386enc.s +++ b/src/cmd/asm/internal/asm/testdata/386enc.s @@ -18,7 +18,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 MOVL -2147483648(AX), AX // 8b8000000080 ADDL 2147483648(AX), AX // 038000000080 ADDL -2147483648(AX), AX // 038000000080 - // Make sure MOV CR/DR continues to work after changing it's movtabs. + // Make sure MOV CR/DR continues to work after changing its movtabs. MOVL CR0, AX // 0f20c0 MOVL CR0, DX // 0f20c2 MOVL CR4, DI // 0f20e7 diff --git a/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s b/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s index 2f0d9ecf86..d7afecc230 100644 --- a/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s +++ b/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s @@ -302,7 +302,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 // Check that LEAL is permitted to use overflowing offset. LEAL 2400959708(BP)(R10*1), BP // 428dac15dcbc1b8f LEAL 3395469782(AX)(R10*1), AX // 428d8410d6c162ca - // Make sure MOV CR/DR continues to work after changing it's movtabs. + // Make sure MOV CR/DR continues to work after changing its movtabs. MOVQ CR0, AX // 0f20c0 MOVQ CR0, DX // 0f20c2 MOVQ CR4, DI // 0f20e7 diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 9e2e2b1dc5..12c7adbd04 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -25,6 +25,18 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 ADD R1, R2, R3 ADD R1, ZR, R3 ADD $1, R2, R3 + ADD $0x000aaa, R2, R3 // ADD $2730, R2, R3 // 43a82a91 + ADD $0x000aaa, R2 // ADD $2730, R2 // 42a82a91 + ADD $0xaaa000, R2, R3 // ADD $11182080, R2, R3 // 43a86a91 + ADD $0xaaa000, R2 // ADD $11182080, R2 // 42a86a91 + ADD $0xaaaaaa, R2, R3 // ADD $11184810, R2, R3 // 43a82a9163a86a91 + ADD $0xaaaaaa, R2 // ADD $11184810, R2 // 42a82a9142a86a91 + SUB $0x000aaa, R2, R3 // SUB $2730, R2, R3 // 43a82ad1 + SUB $0x000aaa, R2 // SUB $2730, R2 // 42a82ad1 + SUB $0xaaa000, R2, R3 // SUB $11182080, R2, R3 // 43a86ad1 + SUB $0xaaa000, R2 // SUB $11182080, R2 // 42a86ad1 + SUB $0xaaaaaa, R2, R3 // SUB $11184810, R2, R3 // 43a82ad163a86ad1 + SUB $0xaaaaaa, R2 // SUB $11184810, R2 // 42a82ad142a86ad1 ADD R1>>11, R2, R3 ADD R1<<22, R2, R3 ADD R1->33, R2, R3 @@ -179,6 +191,11 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc FMOVD F4, (R2)(R6<<3) // 447826fc + CMPW $40960, R0 // 1f284071 + CMPW $27745, R2 // 3b8c8d525f001b6b + CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b + CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b + ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b // LTYPE1 imsr ',' spreg ',' // { // outcode($1, &$2, $4, &nullgen); @@ -214,6 +231,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 ANDS $0x22220000, R3, R4 // ANDS $572653568, R3, R4 // 5b44a4d264001bea BICS $0x22220000, R3, R4 // BICS $572653568, R3, R4 // 5b44a4d264003bea + EOR $0xe03fffffffffffff, R20, R22 // EOR $-2287828610704211969, R20, R22 // 96e243d2 + TSTW $0x600000006, R1 // TSTW $25769803782, R1 // 3f041f72 + ANDS $0xffff, R2 // ANDS $65535, R2 // 423c40f2 + AND $0x7fffffff, R3 // AND $2147483647, R3 // 63784092 + ANDS $0x0ffffffff80000000, R2 // ANDS $-2147483648, R2 // 428061f2 + AND $0xfffff, R2 // AND $1048575, R2 // 424c4092 + ANDW $0xf00fffff, R1 // ANDW $4027580415, R1 // 215c0412 + ANDSW $0xff00ffff, R1 // ANDSW $4278255615, R1 // 215c0872 + TSTW $0xff00ff, R1 // TSTW $16711935, R1 // 3f9c0072 + AND $8, R0, RSP // 1f007d92 ORR $8, R0, RSP // 1f007db2 EOR $8, R0, RSP // 1f007dd2 @@ -221,6 +248,19 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 ORN $8, R0, RSP // 1ff87cb2 EON $8, R0, RSP // 1ff87cd2 + MOVD $0x3fffffffc000, R0 // MOVD $70368744161280, R0 // e07f72b2 + MOVW $0xaaaa0000, R1 // MOVW $2863267840, R1 // 4155b552 + MOVW $0xaaaaffff, R1 // MOVW $2863333375, R1 // a1aaaa12 + MOVW $0xaaaa, R1 // MOVW $43690, R1 // 41559552 + MOVW $0xffffaaaa, R1 // MOVW $4294945450, R1 // a1aa8a12 + MOVW $0xffff0000, R1 // MOVW $4294901760, R1 // e1ffbf52 + MOVD $0xffff00000000000, R1 // MOVD $1152903912420802560, R1 // e13f54b2 + MOVD $0x11110000, R1 // MOVD $286326784, R1 // 2122a2d2 + MOVD $0, R1 // 010080d2 + MOVD $-1, R1 // 01008092 + MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2 + MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92 + // // CLS // @@ -416,7 +456,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 CMP R22.SXTX, RSP // ffe336eb CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb - CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a4d2ff633b6b + CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b // TST TST $15, R2 // 5f0c40f2 diff --git a/src/cmd/asm/internal/asm/testdata/ppc64enc.s b/src/cmd/asm/internal/asm/testdata/ppc64enc.s index 7ab1a578f8..0133a85b98 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64enc.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64enc.s @@ -98,4 +98,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 LDAR (R4),$0,R5 // 7ca020a8 LDAR (R3),R5 // 7ca018a8 + // float constants + FMOVD $(0.0), F1 // f0210cd0 + FMOVD $(-0.0), F1 // f0210cd0fc200850 RET diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 019ee64c8e..45bf90ffc2 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -9,6 +9,7 @@ package main import ( "bytes" + "cmd/internal/xcoff" "debug/dwarf" "debug/elf" "debug/macho" @@ -188,6 +189,7 @@ func (p *Package) Translate(f *File) { break } } + p.prepareNames(f) if p.rewriteCalls(f) { // Add `import _cgo_unsafe "unsafe"` after the package statement. f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"") @@ -679,6 +681,27 @@ func (p *Package) recordTypedefs1(dtype dwarf.Type, visited map[dwarf.Type]bool) } } +// prepareNames finalizes the Kind field of not-type names and sets +// the mangled name of all names. +func (p *Package) prepareNames(f *File) { + for _, n := range f.Name { + if n.Kind == "not-type" { + if n.Define == "" { + n.Kind = "var" + } else { + n.Kind = "macro" + n.FuncType = &FuncType{ + Result: n.Type, + Go: &ast.FuncType{ + Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}}, + }, + } + } + } + p.mangleName(n) + } +} + // mangleName does name mangling to translate names // from the original Go source files to the names // used in the final Go files generated by cgo. @@ -722,16 +745,19 @@ func (p *Package) rewriteCalls(f *File) bool { // argument and then calls the original function. // This returns whether the package needs to import unsafe as _cgo_unsafe. func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool { + params := name.FuncType.Params + args := call.Call.Args + // Avoid a crash if the number of arguments is // less than the number of parameters. // This will be caught when the generated file is compiled. - if len(call.Call.Args) < len(name.FuncType.Params) { + if len(args) < len(params) { return false } any := false - for i, param := range name.FuncType.Params { - if p.needsPointerCheck(f, param.Go, call.Call.Args[i]) { + for i, param := range params { + if p.needsPointerCheck(f, param.Go, args[i]) { any = true break } @@ -750,127 +776,108 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool { // Using a function literal like this lets us do correct // argument type checking, and works correctly if the call is // deferred. + var sb bytes.Buffer + sb.WriteString("func(") + needsUnsafe := false - params := make([]*ast.Field, len(name.FuncType.Params)) - nargs := make([]ast.Expr, len(name.FuncType.Params)) - var stmts []ast.Stmt - for i, param := range name.FuncType.Params { - // params is going to become the parameters of the - // function literal. - // nargs is going to become the list of arguments made - // by the call within the function literal. - // nparam is the parameter of the function literal that - // corresponds to param. - origArg := call.Call.Args[i] - nparam := ast.NewIdent(fmt.Sprintf("_cgo%d", i)) - nargs[i] = nparam + for i, param := range params { + if i > 0 { + sb.WriteString(", ") + } + + fmt.Fprintf(&sb, "_cgo%d ", i) - // The Go version of the C type might use unsafe.Pointer, - // but the file might not import unsafe. - // Rewrite the Go type if necessary to use _cgo_unsafe. ptype := p.rewriteUnsafe(param.Go) if ptype != param.Go { needsUnsafe = true } + sb.WriteString(gofmtLine(ptype)) + } - params[i] = &ast.Field{ - Names: []*ast.Ident{nparam}, - Type: ptype, - } + sb.WriteString(")") - if !p.needsPointerCheck(f, param.Go, origArg) { + result := false + twoResults := false + + // Check whether this call expects two results. + for _, ref := range f.Ref { + if ref.Expr != &call.Call.Fun { continue } - - // Run the cgo pointer checks on nparam. - - // Change the function literal to call the real function - // with the parameter passed through _cgoCheckPointer. - c := &ast.CallExpr{ - Fun: ast.NewIdent("_cgoCheckPointer"), - Args: []ast.Expr{ - nparam, - }, + if ref.Context == ctxCall2 { + sb.WriteString(" (") + result = true + twoResults = true } - - // Add optional additional arguments for an address - // expression. - c.Args = p.checkAddrArgs(f, c.Args, origArg) - - stmt := &ast.ExprStmt{ - X: c, - } - stmts = append(stmts, stmt) + break } - const cgoMarker = "__cgo__###__marker__" - fcall := &ast.CallExpr{ - Fun: ast.NewIdent(cgoMarker), - Args: nargs, - } - ftype := &ast.FuncType{ - Params: &ast.FieldList{ - List: params, - }, - } + // Add the result type, if any. if name.FuncType.Result != nil { rtype := p.rewriteUnsafe(name.FuncType.Result.Go) if rtype != name.FuncType.Result.Go { needsUnsafe = true } - ftype.Results = &ast.FieldList{ - List: []*ast.Field{ - &ast.Field{ - Type: rtype, - }, - }, + if !twoResults { + sb.WriteString(" ") } + sb.WriteString(gofmtLine(rtype)) + result = true } - // If this call expects two results, we have to - // adjust the results of the function we generated. - for _, ref := range f.Ref { - if ref.Expr == &call.Call.Fun && ref.Context == ctxCall2 { - if ftype.Results == nil { - // An explicit void argument - // looks odd but it seems to - // be how cgo has worked historically. - ftype.Results = &ast.FieldList{ - List: []*ast.Field{ - &ast.Field{ - Type: ast.NewIdent("_Ctype_void"), - }, - }, - } - } - ftype.Results.List = append(ftype.Results.List, - &ast.Field{ - Type: ast.NewIdent("error"), - }) + // Add the second result type, if any. + if twoResults { + if name.FuncType.Result == nil { + // An explicit void result looks odd but it + // seems to be how cgo has worked historically. + sb.WriteString("_Ctype_void") } + sb.WriteString(", error)") } - var fbody ast.Stmt - if ftype.Results == nil { - fbody = &ast.ExprStmt{ - X: fcall, + sb.WriteString(" { ") + + for i, param := range params { + arg := args[i] + if !p.needsPointerCheck(f, param.Go, arg) { + continue } - } else { - fbody = &ast.ReturnStmt{ - Results: []ast.Expr{fcall}, + + // Check for &a[i]. + if p.checkIndex(&sb, f, arg, i) { + continue } + + // Check for &x. + if p.checkAddr(&sb, arg, i) { + continue + } + + fmt.Fprintf(&sb, "_cgoCheckPointer(_cgo%d); ", i) } - lit := &ast.FuncLit{ - Type: ftype, - Body: &ast.BlockStmt{ - List: append(stmts, fbody), - }, + + if result { + sb.WriteString("return ") } - text := strings.Replace(gofmt(lit), "\n", ";", -1) - repl := strings.Split(text, cgoMarker) - f.Edit.Insert(f.offset(call.Call.Fun.Pos()), repl[0]) - f.Edit.Insert(f.offset(call.Call.Fun.End()), repl[1]) + + // Now we are ready to call the C function. + // To work smoothly with rewriteRef we leave the call in place + // and just insert our new arguments between the function + // and the old arguments. + f.Edit.Insert(f.offset(call.Call.Fun.Pos()), sb.String()) + + sb.Reset() + sb.WriteString("(") + for i := range params { + if i > 0 { + sb.WriteString(", ") + } + fmt.Fprintf(&sb, "_cgo%d", i) + } + sb.WriteString("); }") + + f.Edit.Insert(f.offset(call.Call.Lparen), sb.String()) return needsUnsafe } @@ -979,19 +986,13 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool { } } -// checkAddrArgs tries to add arguments to the call of -// _cgoCheckPointer when the argument is an address expression. We -// pass true to mean that the argument is an address operation of -// something other than a slice index, which means that it's only -// necessary to check the specific element pointed to, not the entire -// object. This is for &s.f, where f is a field in a struct. We can -// pass a slice or array, meaning that we should check the entire -// slice or array but need not check any other part of the object. -// This is for &s.a[i], where we need to check all of a. However, we -// only pass the slice or array if we can refer to it without side -// effects. -func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr { +// checkIndex checks whether arg the form &a[i], possibly inside type +// conversions. If so, and if a has no side effects, it writes +// _cgoCheckPointer(_cgoNN, a) to sb and returns true. This tells +// _cgoCheckPointer to check the complete contents of the slice. +func (p *Package) checkIndex(sb *bytes.Buffer, f *File, arg ast.Expr, i int) bool { // Strip type conversions. + x := arg for { c, ok := x.(*ast.CallExpr) if !ok || len(c.Args) != 1 || !p.isType(c.Fun) { @@ -1001,22 +1002,46 @@ func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr } u, ok := x.(*ast.UnaryExpr) if !ok || u.Op != token.AND { - return args + return false } index, ok := u.X.(*ast.IndexExpr) if !ok { - // This is the address of something that is not an - // index expression. We only need to examine the - // single value to which it points. - // TODO: what if true is shadowed? - return append(args, ast.NewIdent("true")) + return false } - if !p.hasSideEffects(f, index.X) { - // Examine the entire slice. - return append(args, index.X) + if p.hasSideEffects(f, index.X) { + return false } - // Treat the pointer as unknown. - return args + + fmt.Fprintf(sb, "_cgoCheckPointer(_cgo%d, %s); ", i, gofmtLine(index.X)) + + return true +} + +// checkAddr checks whether arg has the form &x, possibly inside type +// conversions. If so it writes _cgoCheckPointer(_cgoNN, true) to sb +// and returns true. This tells _cgoCheckPointer to check just the +// contents of the pointer being passed, not any other part of the +// memory allocation. This is run after checkIndex, which looks for +// the special case of &a[i], which requires different checks. +func (p *Package) checkAddr(sb *bytes.Buffer, arg ast.Expr, i int) bool { + // Strip type conversions. + px := &arg + for { + c, ok := (*px).(*ast.CallExpr) + if !ok || len(c.Args) != 1 || !p.isType(c.Fun) { + break + } + px = &c.Args[0] + } + if u, ok := (*px).(*ast.UnaryExpr); !ok || u.Op != token.AND { + return false + } + + // Use "0 == 0" to do the right thing in the unlikely event + // that "true" is shadowed. + fmt.Fprintf(sb, "_cgoCheckPointer(_cgo%d, 0 == 0); ", i) + + return true } // hasSideEffects returns whether the expression x has any side @@ -1026,8 +1051,7 @@ func (p *Package) hasSideEffects(f *File, x ast.Expr) bool { found := false f.walk(x, ctxExpr, func(f *File, x interface{}, context astContext) { - switch x.(type) { - case *ast.CallExpr: + if _, ok := x.(*ast.CallExpr); ok { found = true } }) @@ -1131,24 +1155,7 @@ func (p *Package) rewriteRef(f *File) { // code for them. functions := make(map[string]bool) - // Assign mangled names. for _, n := range f.Name { - if n.Kind == "not-type" { - if n.Define == "" { - n.Kind = "var" - } else { - n.Kind = "macro" - n.FuncType = &FuncType{ - Result: n.Type, - Go: &ast.FuncType{ - Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}}, - }, - } - } - } - if n.Mangle == "" { - p.mangleName(n) - } if n.Kind == "func" { functions[n.Go] = false } @@ -1162,104 +1169,16 @@ func (p *Package) rewriteRef(f *File) { if r.Name.IsConst() && r.Name.Const == "" { error_(r.Pos(), "unable to find value of constant C.%s", fixGo(r.Name.Go)) } - var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default - switch r.Context { - case ctxCall, ctxCall2: - if r.Name.Kind != "func" { - if r.Name.Kind == "type" { - r.Context = ctxType - if r.Name.Type == nil { - error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) - break - } - expr = r.Name.Type.Go - break - } - error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go)) - break - } - functions[r.Name.Go] = true - if r.Context == ctxCall2 { - if r.Name.Go == "_CMalloc" { - error_(r.Pos(), "no two-result form for C.malloc") - break - } - // Invent new Name for the two-result function. - n := f.Name["2"+r.Name.Go] - if n == nil { - n = new(Name) - *n = *r.Name - n.AddError = true - n.Mangle = "_C2func_" + n.Go - f.Name["2"+r.Name.Go] = n - } - expr = ast.NewIdent(n.Mangle) - r.Name = n - break - } - case ctxExpr: - switch r.Name.Kind { - case "func": - if builtinDefs[r.Name.C] != "" { - error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C)) - } - // Function is being used in an expression, to e.g. pass around a C function pointer. - // Create a new Name for this Ref which causes the variable to be declared in Go land. - fpName := "fp_" + r.Name.Go - name := f.Name[fpName] - if name == nil { - name = &Name{ - Go: fpName, - C: r.Name.C, - Kind: "fpvar", - Type: &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*"), Go: ast.NewIdent("unsafe.Pointer")}, - } - p.mangleName(name) - f.Name[fpName] = name - } - r.Name = name - // Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr - // function is defined in out.go and simply returns its argument. See - // issue 7757. - expr = &ast.CallExpr{ - Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"}, - Args: []ast.Expr{ast.NewIdent(name.Mangle)}, - } - case "type": - // Okay - might be new(T) - if r.Name.Type == nil { - error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) - break - } - expr = r.Name.Type.Go - case "var": - expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} - case "macro": - expr = &ast.CallExpr{Fun: expr} - } - case ctxSelector: - if r.Name.Kind == "var" { - expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} - } else { - error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go)) - } - case ctxType: - if r.Name.Kind != "type" { - error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go)) - } else if r.Name.Type == nil { - // Use of C.enum_x, C.struct_x or C.union_x without C definition. - // GCC won't raise an error when using pointers to such unknown types. - error_(r.Pos(), "type C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) - } else { - expr = r.Name.Type.Go - } - default: - if r.Name.Kind == "func" { - error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go)) + if r.Name.Kind == "func" { + switch r.Context { + case ctxCall, ctxCall2: + functions[r.Name.Go] = true } } + expr := p.rewriteName(f, r) + if *godefs { // Substitute definition for mangled type name. if id, ok := expr.(*ast.Ident); ok { @@ -1276,8 +1195,7 @@ func (p *Package) rewriteRef(f *File) { // in case expression being replaced is first on line. // See golang.org/issue/6563. pos := (*r.Expr).Pos() - switch x := expr.(type) { - case *ast.Ident: + if x, ok := expr.(*ast.Ident); ok { expr = &ast.Ident{NamePos: pos, Name: x.Name} } @@ -1303,6 +1221,107 @@ func (p *Package) rewriteRef(f *File) { } } +// rewriteName returns the expression used to rewrite a reference. +func (p *Package) rewriteName(f *File, r *Ref) ast.Expr { + var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default + switch r.Context { + case ctxCall, ctxCall2: + if r.Name.Kind != "func" { + if r.Name.Kind == "type" { + r.Context = ctxType + if r.Name.Type == nil { + error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) + break + } + expr = r.Name.Type.Go + break + } + error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go)) + break + } + if r.Context == ctxCall2 { + if r.Name.Go == "_CMalloc" { + error_(r.Pos(), "no two-result form for C.malloc") + break + } + // Invent new Name for the two-result function. + n := f.Name["2"+r.Name.Go] + if n == nil { + n = new(Name) + *n = *r.Name + n.AddError = true + n.Mangle = "_C2func_" + n.Go + f.Name["2"+r.Name.Go] = n + } + expr = ast.NewIdent(n.Mangle) + r.Name = n + break + } + case ctxExpr: + switch r.Name.Kind { + case "func": + if builtinDefs[r.Name.C] != "" { + error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C)) + } + + // Function is being used in an expression, to e.g. pass around a C function pointer. + // Create a new Name for this Ref which causes the variable to be declared in Go land. + fpName := "fp_" + r.Name.Go + name := f.Name[fpName] + if name == nil { + name = &Name{ + Go: fpName, + C: r.Name.C, + Kind: "fpvar", + Type: &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*"), Go: ast.NewIdent("unsafe.Pointer")}, + } + p.mangleName(name) + f.Name[fpName] = name + } + r.Name = name + // Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr + // function is defined in out.go and simply returns its argument. See + // issue 7757. + expr = &ast.CallExpr{ + Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"}, + Args: []ast.Expr{ast.NewIdent(name.Mangle)}, + } + case "type": + // Okay - might be new(T) + if r.Name.Type == nil { + error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) + break + } + expr = r.Name.Type.Go + case "var": + expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} + case "macro": + expr = &ast.CallExpr{Fun: expr} + } + case ctxSelector: + if r.Name.Kind == "var" { + expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} + } else { + error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go)) + } + case ctxType: + if r.Name.Kind != "type" { + error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go)) + } else if r.Name.Type == nil { + // Use of C.enum_x, C.struct_x or C.union_x without C definition. + // GCC won't raise an error when using pointers to such unknown types. + error_(r.Pos(), "type C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) + } else { + expr = r.Name.Type.Go + } + default: + if r.Name.Kind == "func" { + error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go)) + } + } + return expr +} + // gccBaseCmd returns the start of the compiler command line. // It uses $CC if set, or else $GCC, or else the compiler recorded // during the initial build as defaultCC. @@ -1377,6 +1396,9 @@ func (p *Package) gccCmd() []string { c = append(c, p.GccOptions...) c = append(c, p.gccMachine()...) + if goos == "aix" { + c = append(c, "-maix64") + } c = append(c, "-") //read input from standard input return c } @@ -1663,7 +1685,77 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6 return d, ints, floats, strs } - fatalf("cannot parse gcc output %s as ELF, Mach-O, PE object", gccTmp()) + if f, err := xcoff.Open(gccTmp()); err == nil { + defer f.Close() + d, err := f.DWARF() + if err != nil { + fatalf("cannot load DWARF output from %s: %v", gccTmp(), err) + } + bo := binary.BigEndian + for _, s := range f.Symbols { + switch { + case isDebugInts(s.Name): + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + ints = make([]int64, len(data)/8) + for i := range ints { + ints[i] = int64(bo.Uint64(data[i*8:])) + } + } + } + } + case isDebugFloats(s.Name): + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + floats = make([]float64, len(data)/8) + for i := range floats { + floats[i] = math.Float64frombits(bo.Uint64(data[i*8:])) + } + } + } + } + default: + if n := indexOfDebugStr(s.Name); n != -1 { + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + strdata[n] = string(data) + } + } + } + break + } + if n := indexOfDebugStrlen(s.Name); n != -1 { + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + strlen := bo.Uint64(data[:8]) + if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt? + fatalf("string literal too big") + } + strlens[n] = int(strlen) + } + } + } + break + } + } + } + + buildStrings() + return d, ints, floats, strs + } + fatalf("cannot parse gcc output %s as ELF, Mach-O, PE, XCOFF object", gccTmp()) panic("not reached") } diff --git a/src/cmd/cgo/godefs.go b/src/cmd/cgo/godefs.go index 6720945cdd..9c763a22fb 100644 --- a/src/cmd/cgo/godefs.go +++ b/src/cmd/cgo/godefs.go @@ -126,3 +126,9 @@ func gofmt(n interface{}) string { } return gofmtBuf.String() } + +// gofmtLine returns the gofmt-formatted string for an AST node, +// ensuring that it is on a single line. +func gofmtLine(n interface{}) string { + return strings.Replace(gofmt(n), "\n", ";", -1) +} diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 6217bb17a3..8a26d5c063 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -6,6 +6,7 @@ package main import ( "bytes" + "cmd/internal/xcoff" "debug/elf" "debug/macho" "debug/pe" @@ -312,7 +313,25 @@ func dynimport(obj string) { return } - fatalf("cannot parse %s as ELF, Mach-O or PE", obj) + if f, err := xcoff.Open(obj); err == nil { + sym, err := f.ImportedSymbols() + if err != nil { + fatalf("cannot load imported symbols from XCOFF file %s: %v", obj, err) + } + for _, s := range sym { + fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, s.Name, s.Library) + } + lib, err := f.ImportedLibraries() + if err != nil { + fatalf("cannot load imported libraries from XCOFF file %s: %v", obj, err) + } + for _, l := range lib { + fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l) + } + return + } + + fatalf("cannot parse %s as ELF, Mach-O, PE or XCOFF", obj) } // Construct a gcc struct matching the gc argument frame. diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go index 0dfaacb584..bce03fc40f 100644 --- a/src/cmd/compile/doc.go +++ b/src/cmd/compile/doc.go @@ -92,8 +92,6 @@ Flags: Compile with race detector enabled. -trimpath prefix Remove prefix from recorded source file paths. - -u - Disallow importing packages not marked as safe; implies -nolocalimports. There are also a number of debugging flags; run the command with no arguments for a usage message. @@ -125,7 +123,7 @@ directive can skip over a directive like any other comment. // For a //line comment, this is the first character of the next line, and // for a /*line comment this is the character position immediately following the closing */. // If no filename is given, the recorded filename is empty if there is also no column number; -// otherwise is is the most recently recorded filename (actual filename or filename specified +// otherwise it is the most recently recorded filename (actual filename or filename specified // by previous line directive). // If a line directive doesn't specify a column number, the column is "unknown" until // the next directive and the compiler does not report column numbers for that range. @@ -146,7 +144,7 @@ directive can skip over a directive like any other comment. // will report positions in the original input to the generator. /* The line directive is an historical special case; all other directives are of the form -//go:name and must start at the begnning of a line, indicating that the directive is defined +//go:name and must start at the beginning of a line, indicating that the directive is defined by the Go toolchain. //go:noescape diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go index 65f88dfff9..6dfdea1a34 100644 --- a/src/cmd/compile/fmt_test.go +++ b/src/cmd/compile/fmt_test.go @@ -583,7 +583,6 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "", "*cmd/compile/internal/types.Field %p": "", "*cmd/compile/internal/types.Field %v": "", - "*cmd/compile/internal/types.Sym %+v": "", "*cmd/compile/internal/types.Sym %0S": "", "*cmd/compile/internal/types.Sym %S": "", "*cmd/compile/internal/types.Sym %p": "", diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index b4c4b1f4cd..749dbf1d5d 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -229,24 +229,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // Result[0] (the quotient) is in AX. // Result[1] (the remainder) is in DX. r := v.Args[1].Reg() + var j1 *obj.Prog // CPU faults upon signed overflow, which occurs when the most // negative int is divided by -1. Handle divide by -1 as a special case. - var c *obj.Prog - switch v.Op { - case ssa.OpAMD64DIVQ: - c = s.Prog(x86.ACMPQ) - case ssa.OpAMD64DIVL: - c = s.Prog(x86.ACMPL) - case ssa.OpAMD64DIVW: - c = s.Prog(x86.ACMPW) + if ssa.NeedsFixUp(v) { + var c *obj.Prog + switch v.Op { + case ssa.OpAMD64DIVQ: + c = s.Prog(x86.ACMPQ) + case ssa.OpAMD64DIVL: + c = s.Prog(x86.ACMPL) + case ssa.OpAMD64DIVW: + c = s.Prog(x86.ACMPW) + } + c.From.Type = obj.TYPE_REG + c.From.Reg = r + c.To.Type = obj.TYPE_CONST + c.To.Offset = -1 + j1 = s.Prog(x86.AJEQ) + j1.To.Type = obj.TYPE_BRANCH } - c.From.Type = obj.TYPE_REG - c.From.Reg = r - c.To.Type = obj.TYPE_CONST - c.To.Offset = -1 - j1 := s.Prog(x86.AJEQ) - j1.To.Type = obj.TYPE_BRANCH // Sign extend dividend. switch v.Op { @@ -263,36 +266,38 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = r - // Skip over -1 fixup code. - j2 := s.Prog(obj.AJMP) - j2.To.Type = obj.TYPE_BRANCH + if j1 != nil { + // Skip over -1 fixup code. + j2 := s.Prog(obj.AJMP) + j2.To.Type = obj.TYPE_BRANCH - // Issue -1 fixup code. - // n / -1 = -n - var n1 *obj.Prog - switch v.Op { - case ssa.OpAMD64DIVQ: - n1 = s.Prog(x86.ANEGQ) - case ssa.OpAMD64DIVL: - n1 = s.Prog(x86.ANEGL) - case ssa.OpAMD64DIVW: - n1 = s.Prog(x86.ANEGW) + // Issue -1 fixup code. + // n / -1 = -n + var n1 *obj.Prog + switch v.Op { + case ssa.OpAMD64DIVQ: + n1 = s.Prog(x86.ANEGQ) + case ssa.OpAMD64DIVL: + n1 = s.Prog(x86.ANEGL) + case ssa.OpAMD64DIVW: + n1 = s.Prog(x86.ANEGW) + } + n1.To.Type = obj.TYPE_REG + n1.To.Reg = x86.REG_AX + + // n % -1 == 0 + n2 := s.Prog(x86.AXORL) + n2.From.Type = obj.TYPE_REG + n2.From.Reg = x86.REG_DX + n2.To.Type = obj.TYPE_REG + n2.To.Reg = x86.REG_DX + + // TODO(khr): issue only the -1 fixup code we need. + // For instance, if only the quotient is used, no point in zeroing the remainder. + + j1.To.Val = n1 + j2.To.Val = s.Pc() } - n1.To.Type = obj.TYPE_REG - n1.To.Reg = x86.REG_AX - - // n % -1 == 0 - n2 := s.Prog(x86.AXORL) - n2.From.Type = obj.TYPE_REG - n2.From.Reg = x86.REG_DX - n2.To.Type = obj.TYPE_REG - n2.To.Reg = x86.REG_DX - - // TODO(khr): issue only the -1 fixup code we need. - // For instance, if only the quotient is used, no point in zeroing the remainder. - - j1.To.Val = n1 - j2.To.Val = s.Pc() case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: // the frontend rewrites constant division by 8/16/32 bit integers into @@ -315,6 +320,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { m.To.Reg = x86.REG_DX } + case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU: + // Arg[0] is already in AX as it's the only register we allow + // results lo in AX + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + case ssa.OpAMD64MULQU2: // Arg[0] is already in AX as it's the only register we allow // results hi in DX, lo in AX @@ -653,43 +665,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) - p.From.Scale = 8 - p.From.Index = v.Args[1].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) - p.From.Scale = 4 - p.From.Index = v.Args[1].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.OpAMD64MOVWloadidx2: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) - p.From.Scale = 2 - p.From.Index = v.Args[1].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1: + case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1, + ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2: r := v.Args[0].Reg() i := v.Args[1].Reg() - if i == x86.REG_SP { - r, i = i, r - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM + switch v.Op { + case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1: + if i == x86.REG_SP { + r, i = i, r + } + p.From.Scale = 1 + case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8: + p.From.Scale = 8 + case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4: + p.From.Scale = 4 + case ssa.OpAMD64MOVWloadidx2: + p.From.Scale = 2 + } p.From.Reg = r - p.From.Scale = 1 p.From.Index = i gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG @@ -704,45 +699,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) - case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[2].Reg() - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - p.To.Scale = 8 - p.To.Index = v.Args[1].Reg() - gc.AddAux(&p.To, v) - case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[2].Reg() - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - p.To.Scale = 4 - p.To.Index = v.Args[1].Reg() - gc.AddAux(&p.To, v) - case ssa.OpAMD64MOVWstoreidx2: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[2].Reg() - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - p.To.Scale = 2 - p.To.Index = v.Args[1].Reg() - gc.AddAux(&p.To, v) - case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1: + case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1, + ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2: r := v.Args[0].Reg() i := v.Args[1].Reg() - if i == x86.REG_SP { - r, i = i, r - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_MEM + switch v.Op { + case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1: + if i == x86.REG_SP { + r, i = i, r + } + p.To.Scale = 1 + case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8: + p.To.Scale = 8 + case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4: + p.To.Scale = 4 + case ssa.OpAMD64MOVWstoreidx2: + p.To.Scale = 2 + } p.To.Reg = r - p.To.Scale = 1 p.To.Index = i gc.AddAux(&p.To, v) case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify: @@ -816,14 +794,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // Break false dependency on destination register. opregreg(s, x86.AXORPS, r, r) opregreg(s, v.Op.Asm(), r, v.Args[0].Reg()) - case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i: - p := s.Prog(x86.AMOVQ) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: - p := s.Prog(x86.AMOVL) + case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: + var p *obj.Prog + switch v.Op { + case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i: + p = s.Prog(x86.AMOVQ) + case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: + p = s.Prog(x86.AMOVL) + } p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG @@ -968,24 +946,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ: + case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg0() - case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.OpAMD64SQRTSD: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() + switch v.Op { + case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ: + p.To.Reg = v.Reg0() + case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD: + p.To.Reg = v.Reg() + } case ssa.OpAMD64ROUNDSD: p := s.Prog(v.Op.Asm()) val := v.AuxInt @@ -1020,7 +991,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF, ssa.OpAMD64SETB, ssa.OpAMD64SETBE, ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN, - ssa.OpAMD64SETA, ssa.OpAMD64SETAE: + ssa.OpAMD64SETA, ssa.OpAMD64SETAE, + ssa.OpAMD64SETO: p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -1163,6 +1135,8 @@ var blockJump = [...]struct { ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT}, ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT}, ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE}, + ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC}, + ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS}, ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC}, ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS}, ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS}, @@ -1224,6 +1198,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { case ssa.BlockAMD64EQ, ssa.BlockAMD64NE, ssa.BlockAMD64LT, ssa.BlockAMD64GE, ssa.BlockAMD64LE, ssa.BlockAMD64GT, + ssa.BlockAMD64OS, ssa.BlockAMD64OC, ssa.BlockAMD64ULT, ssa.BlockAMD64UGT, ssa.BlockAMD64ULE, ssa.BlockAMD64UGE: jmp := blockJump[b.Kind] diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index b7d88531fd..b112ff6797 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -102,7 +102,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) { case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR, - TBOOL, TPTR32, TPTR64, + TBOOL, TPTR, TCHAN, TUNSAFEPTR: return AMEM, nil @@ -300,18 +300,8 @@ func genhash(sym *types.Sym, t *types.Type) { testdclstack() } - // Disable safemode while compiling this code: the code we - // generate internally can refer to unsafe.Pointer. - // In this case it can happen if we need to generate an == - // for a struct containing a reflect.Value, which itself has - // an unexported field of type unsafe.Pointer. - old_safemode := safemode - safemode = false - fn.Func.SetNilCheckDisabled(true) funccompile(fn) - - safemode = old_safemode } func hashfor(t *types.Type) *Node { @@ -484,22 +474,12 @@ func geneq(sym *types.Sym, t *types.Type) { testdclstack() } - // Disable safemode while compiling this code: the code we - // generate internally can refer to unsafe.Pointer. - // In this case it can happen if we need to generate an == - // for a struct containing a reflect.Value, which itself has - // an unexported field of type unsafe.Pointer. - old_safemode := safemode - safemode = false - // Disable checknils while compiling this code. // We are comparing a struct or an array, // neither of which can be nil, and our comparisons // are shallow. fn.Func.SetNilCheckDisabled(true) funccompile(fn) - - safemode = old_safemode } // eqfield returns the node diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 9e752fc628..fb761d2339 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -250,12 +250,8 @@ func dowidth(t *types.Type) { w = 16 t.Align = uint8(Widthreg) - case TPTR32: - w = 4 - checkwidth(t.Elem()) - - case TPTR64: - w = 8 + case TPTR: + w = int64(Widthptr) checkwidth(t.Elem()) case TUNSAFEPTR: diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 3ef1e6af4d..7c09ab5a34 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -43,7 +43,7 @@ func (p *exporter) markType(t *types.Type) { // the user already needs some way to construct values of // those types. switch t.Etype { - case TPTR32, TPTR64, TARRAY, TSLICE, TCHAN: + case TPTR, TARRAY, TSLICE, TCHAN: // TODO(mdempsky): Skip marking element type for // send-only channels? p.markType(t.Elem()) diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index ec8f1093b6..8051c7d0df 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -55,15 +55,15 @@ var runtimeDecls = [...]struct { {"convT2E16", funcTag, 52}, {"convT2E32", funcTag, 52}, {"convT2E64", funcTag, 52}, - {"convT2Estring", funcTag, 53}, - {"convT2Eslice", funcTag, 53}, + {"convT2Estring", funcTag, 52}, + {"convT2Eslice", funcTag, 52}, {"convT2Enoptr", funcTag, 53}, {"convT2I", funcTag, 53}, {"convT2I16", funcTag, 52}, {"convT2I32", funcTag, 52}, {"convT2I64", funcTag, 52}, - {"convT2Istring", funcTag, 53}, - {"convT2Islice", funcTag, 53}, + {"convT2Istring", funcTag, 52}, + {"convT2Islice", funcTag, 52}, {"convT2Inoptr", funcTag, 53}, {"assertE2I", funcTag, 52}, {"assertE2I2", funcTag, 54}, diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go index 140b7f3b2d..028936b875 100644 --- a/src/cmd/compile/internal/gc/builtin/runtime.go +++ b/src/cmd/compile/internal/gc/builtin/runtime.go @@ -68,16 +68,16 @@ func convT2E(typ *byte, elem *any) (ret any) func convT2E16(typ *byte, val any) (ret any) func convT2E32(typ *byte, val any) (ret any) func convT2E64(typ *byte, val any) (ret any) -func convT2Estring(typ *byte, elem *any) (ret any) -func convT2Eslice(typ *byte, elem *any) (ret any) +func convT2Estring(typ *byte, val any) (ret any) // val must be a string +func convT2Eslice(typ *byte, val any) (ret any) // val must be a slice func convT2Enoptr(typ *byte, elem *any) (ret any) func convT2I(tab *byte, elem *any) (ret any) func convT2I16(tab *byte, val any) (ret any) func convT2I32(tab *byte, val any) (ret any) func convT2I64(tab *byte, val any) (ret any) -func convT2Istring(tab *byte, elem *any) (ret any) -func convT2Islice(tab *byte, elem *any) (ret any) +func convT2Istring(tab *byte, val any) (ret any) // val must be a string +func convT2Islice(tab *byte, val any) (ret any) // val must be a slice func convT2Inoptr(tab *byte, elem *any) (ret any) // interface type assertions x.(T) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 834cdc41eb..0736c5be4f 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -337,18 +337,10 @@ func closuredebugruntimecheck(clo *Node) { } } -func walkclosure(clo *Node, init *Nodes) *Node { - xfunc := clo.Func.Closure - - // If no closure vars, don't bother wrapping. - if hasemptycvars(clo) { - if Debug_closure > 0 { - Warnl(clo.Pos, "closure converted to global") - } - return xfunc.Func.Nname - } - closuredebugruntimecheck(clo) - +// closureType returns the struct type used to hold all the information +// needed in the closure for clo (clo must be a OCLOSURE node). +// The address of a variable of the returned type can be cast to a func. +func closureType(clo *Node) *types.Type { // Create closure in the form of a composite literal. // supposing the closure captures an int i and a string s // and has one float64 argument and no results, @@ -362,11 +354,10 @@ func walkclosure(clo *Node, init *Nodes) *Node { // The information appears in the binary in the form of type descriptors; // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. - fields := []*Node{ namedfield(".F", types.Types[TUINTPTR]), } - for _, v := range xfunc.Func.Cvars.Slice() { + for _, v := range clo.Func.Closure.Func.Cvars.Slice() { typ := v.Type if !v.Name.Byval() { typ = types.NewPtr(typ) @@ -375,6 +366,22 @@ func walkclosure(clo *Node, init *Nodes) *Node { } typ := tostruct(fields) typ.SetNoalg(true) + return typ +} + +func walkclosure(clo *Node, init *Nodes) *Node { + xfunc := clo.Func.Closure + + // If no closure vars, don't bother wrapping. + if hasemptycvars(clo) { + if Debug_closure > 0 { + Warnl(clo.Pos, "closure converted to global") + } + return xfunc.Func.Nname + } + closuredebugruntimecheck(clo) + + typ := closureType(clo) clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil)) clos.Esc = clo.Esc @@ -389,10 +396,10 @@ func walkclosure(clo *Node, init *Nodes) *Node { clos.Left.Esc = clo.Esc // non-escaping temp to use, if any. - // orderexpr did not compute the type; fill it in now. if x := prealloc[clo]; x != nil { - x.Type = clos.Left.Left.Type - x.Orig.Type = x.Type + if !types.Identical(typ, x.Type) { + panic("closure type does not match order's assigned type") + } clos.Left.Right = x delete(prealloc, clo) } @@ -479,6 +486,18 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node { return xfunc } +// partialCallType returns the struct type used to hold all the information +// needed in the closure for n (n must be a OCALLPART node). +// The address of a variable of the returned type can be cast to a func. +func partialCallType(n *Node) *types.Type { + t := tostruct([]*Node{ + namedfield("F", types.Types[TUINTPTR]), + namedfield("R", n.Left.Type), + }) + t.SetNoalg(true) + return t +} + func walkpartialcall(n *Node, init *Nodes) *Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: @@ -495,30 +514,25 @@ func walkpartialcall(n *Node, init *Nodes) *Node { checknil(n.Left, init) } - typ := tostruct([]*Node{ - namedfield("F", types.Types[TUINTPTR]), - namedfield("R", n.Left.Type), - }) - typ.SetNoalg(true) + typ := partialCallType(n) clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil)) clos.Esc = n.Esc clos.Right.SetImplicit(true) - clos.List.Set1(nod(OCFUNC, n.Func.Nname, nil)) - clos.List.Append(n.Left) + clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left) // Force type conversion from *struct to the func type. clos = convnop(clos, n.Type) - // typecheck will insert a PTRLIT node under CONVNOP, - // tag it with escape analysis result. + // The typecheck inside convnop will insert a PTRLIT node under CONVNOP. + // Tag it with escape analysis result. clos.Left.Esc = n.Esc // non-escaping temp to use, if any. - // orderexpr did not compute the type; fill it in now. if x := prealloc[n]; x != nil { - x.Type = clos.Left.Left.Type - x.Orig.Type = x.Type + if !types.Identical(typ, x.Type) { + panic("partial call type does not match order's assigned type") + } clos.Left.Right = x delete(prealloc, n) } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 02d51678be..a77759832a 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -40,7 +40,7 @@ func (v Val) Ctype() Ctype { switch x := v.U.(type) { default: Fatalf("unexpected Ctype for %T", v.U) - panic("not reached") + panic("unreachable") case nil: return 0 case *NilVal: @@ -68,7 +68,7 @@ func eqval(a, b Val) bool { switch x := a.U.(type) { default: Fatalf("unexpected Ctype for %T", a.U) - panic("not reached") + panic("unreachable") case *NilVal: return true case bool: @@ -96,7 +96,7 @@ func (v Val) Interface() interface{} { switch x := v.U.(type) { default: Fatalf("unexpected Interface for %T", v.U) - panic("not reached") + panic("unreachable") case *NilVal: return nil case bool, string: @@ -311,7 +311,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node { } // avoid repeated calculations, errors - if eqtype(n.Type, t) { + if types.Identical(n.Type, t) { return n } @@ -347,7 +347,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node { case TARRAY: goto bad - case TPTR32, TPTR64, TUNSAFEPTR: + case TPTR, TUNSAFEPTR: n.SetVal(Val{new(Mpint)}) case TCHAN, TFUNC, TINTER, TMAP, TSLICE: @@ -424,29 +424,6 @@ bad: return n } -func copyval(v Val) Val { - switch u := v.U.(type) { - case *Mpint: - i := new(Mpint) - i.Set(u) - i.Rune = u.Rune - v.U = i - - case *Mpflt: - f := newMpflt() - f.Set(u) - v.U = f - - case *Mpcplx: - c := new(Mpcplx) - c.Real.Set(&u.Real) - c.Imag.Set(&u.Imag) - v.U = c - } - - return v -} - func tocplx(v Val) Val { switch u := v.U.(type) { case *Mpint: @@ -585,10 +562,6 @@ func tostr(v Val) Val { i = u.Int64() } v.U = string(i) - - case *NilVal: - // Can happen because of string([]byte(nil)). - v.U = "" } return v @@ -609,50 +582,55 @@ func Isconst(n *Node, ct Ctype) bool { return t == ct || (ct == CTINT && t == CTRUNE) } -// if n is constant, rewrite as OLITERAL node. +// evconst rewrites constant expressions into OLITERAL nodes. func evconst(n *Node) { - // pick off just the opcodes that can be - // constant evaluated. - switch n.Op { - default: - return + nl, nr := n.Left, n.Right - case OADD, - OAND, - OANDAND, - OANDNOT, - OARRAYBYTESTR, - OCOM, - ODIV, - OEQ, - OGE, - OGT, - OLE, - OLSH, - OLT, - OMINUS, - OMOD, - OMUL, - ONE, - ONOT, - OOR, - OOROR, - OPLUS, - ORSH, - OSUB, - OXOR: - break + // Pick off just the opcodes that can be constant evaluated. + switch op := n.Op; op { + case OPLUS, OMINUS, OCOM, ONOT: + if nl.Op == OLITERAL { + setconst(n, unaryOp(op, nl.Val(), n.Type)) + } + + case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND: + if nl.Op == OLITERAL && nr.Op == OLITERAL { + setconst(n, binaryOp(nl.Val(), op, nr.Val())) + } + + case OEQ, ONE, OLT, OLE, OGT, OGE: + if nl.Op == OLITERAL && nr.Op == OLITERAL { + if nl.Type.IsInterface() != nr.Type.IsInterface() { + // Mixed interface/non-interface + // constant comparison means comparing + // nil interface with some typed + // constant, which is always unequal. + // E.g., interface{}(nil) == (*int)(nil). + setboolconst(n, op == ONE) + } else { + setboolconst(n, compareOp(nl.Val(), op, nr.Val())) + } + } + + case OLSH, ORSH: + if nl.Op == OLITERAL && nr.Op == OLITERAL { + setconst(n, shiftOp(nl.Val(), op, nr.Val())) + } case OCONV: - if n.Type == nil { - return - } - if !okforconst[n.Type.Etype] && n.Type.Etype != TNIL { - return + if n.Type != nil && okforconst[n.Type.Etype] && nl.Op == OLITERAL { + // TODO(mdempsky): There should be a convval function. + setconst(n, convlit1(nl, n.Type, true, false).Val()) + } + + case OARRAYBYTESTR: + // string([]byte(nil)) or string([]rune(nil)) + if nl.Op == OLITERAL && nl.Val().Ctype() == CTNIL { + setconst(n, Val{U: ""}) } - // merge adjacent constants in the argument list. case OADDSTR: + // Merge adjacent constants in the argument list. s := n.List.Slice() for i1 := 0; i1 < len(s); i1++ { if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) { @@ -678,521 +656,292 @@ func evconst(n *Node) { } else { n.List.Set(s) } + } +} - return +func match(x, y Val) (Val, Val) { + switch { + case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX: + return tocplx(x), tocplx(y) + case x.Ctype() == CTFLT || y.Ctype() == CTFLT: + return toflt(x), toflt(y) } - nl := n.Left - if nl == nil || nl.Type == nil { - return - } - if consttype(nl) == 0 { - return - } - wl := nl.Type.Etype - if isInt[wl] || isFloat[wl] || isComplex[wl] { - wl = TIDEAL - } + // Mixed int/rune are fine. + return x, y +} - // avoid constant conversions in switches below - const ( - CTINT_ = uint32(CTINT) - CTRUNE_ = uint32(CTRUNE) - CTFLT_ = uint32(CTFLT) - CTCPLX_ = uint32(CTCPLX) - CTSTR_ = uint32(CTSTR) - CTBOOL_ = uint32(CTBOOL) - CTNIL_ = uint32(CTNIL) - OCONV_ = uint32(OCONV) << 16 - OARRAYBYTESTR_ = uint32(OARRAYBYTESTR) << 16 - OPLUS_ = uint32(OPLUS) << 16 - OMINUS_ = uint32(OMINUS) << 16 - OCOM_ = uint32(OCOM) << 16 - ONOT_ = uint32(ONOT) << 16 - OLSH_ = uint32(OLSH) << 16 - ORSH_ = uint32(ORSH) << 16 - OADD_ = uint32(OADD) << 16 - OSUB_ = uint32(OSUB) << 16 - OMUL_ = uint32(OMUL) << 16 - ODIV_ = uint32(ODIV) << 16 - OMOD_ = uint32(OMOD) << 16 - OOR_ = uint32(OOR) << 16 - OAND_ = uint32(OAND) << 16 - OANDNOT_ = uint32(OANDNOT) << 16 - OXOR_ = uint32(OXOR) << 16 - OEQ_ = uint32(OEQ) << 16 - ONE_ = uint32(ONE) << 16 - OLT_ = uint32(OLT) << 16 - OLE_ = uint32(OLE) << 16 - OGE_ = uint32(OGE) << 16 - OGT_ = uint32(OGT) << 16 - OOROR_ = uint32(OOROR) << 16 - OANDAND_ = uint32(OANDAND) << 16 - ) +func compareOp(x Val, op Op, y Val) bool { + x, y = match(x, y) - nr := n.Right - var rv Val - var wr types.EType - var ctype uint32 - var v Val - if nr == nil { - // copy numeric value to avoid modifying - // nl, in case someone still refers to it (e.g. iota). - v = copyval(nl.Val()) - - // rune values are int values for the purpose of constant folding. - ctype = uint32(v.Ctype()) - if ctype == CTRUNE_ { - ctype = CTINT_ + switch x.Ctype() { + case CTNIL: + _, _ = x.U.(*NilVal), y.U.(*NilVal) // assert dynamic types match + switch op { + case OEQ: + return true + case ONE: + return false } - switch uint32(n.Op)<<16 | ctype { - default: - if !n.Diag() { - yyerror("illegal constant expression %v %v", n.Op, nl.Type) - n.SetDiag(true) - } - return + case CTBOOL: + x, y := x.U.(bool), y.U.(bool) + switch op { + case OEQ: + return x == y + case ONE: + return x != y + } - case OCONV_ | CTNIL_, - OARRAYBYTESTR_ | CTNIL_: - if n.Type.IsString() { - v = tostr(v) - nl.Type = n.Type + case CTINT, CTRUNE: + x, y := x.U.(*Mpint), y.U.(*Mpint) + return cmpZero(x.Cmp(y), op) + + case CTFLT: + x, y := x.U.(*Mpflt), y.U.(*Mpflt) + return cmpZero(x.Cmp(y), op) + + case CTCPLX: + x, y := x.U.(*Mpcplx), y.U.(*Mpcplx) + eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0 + switch op { + case OEQ: + return eq + case ONE: + return !eq + } + + case CTSTR: + x, y := x.U.(string), y.U.(string) + switch op { + case OEQ: + return x == y + case ONE: + return x != y + case OLT: + return x < y + case OLE: + return x <= y + case OGT: + return x > y + case OGE: + return x >= y + } + } + + Fatalf("compareOp: bad comparison: %v %v %v", x, op, y) + panic("unreachable") +} + +func cmpZero(x int, op Op) bool { + switch op { + case OEQ: + return x == 0 + case ONE: + return x != 0 + case OLT: + return x < 0 + case OLE: + return x <= 0 + case OGT: + return x > 0 + case OGE: + return x >= 0 + } + + Fatalf("cmpZero: want comparison operator, got %v", op) + panic("unreachable") +} + +func binaryOp(x Val, op Op, y Val) Val { + x, y = match(x, y) + +Outer: + switch x.Ctype() { + case CTBOOL: + x, y := x.U.(bool), y.U.(bool) + switch op { + case OANDAND: + return Val{U: x && y} + case OOROR: + return Val{U: x || y} + } + + case CTINT, CTRUNE: + x, y := x.U.(*Mpint), y.U.(*Mpint) + + u := new(Mpint) + u.Rune = x.Rune || y.Rune + u.Set(x) + switch op { + case OADD: + u.Add(y) + case OSUB: + u.Sub(y) + case OMUL: + u.Mul(y) + case ODIV: + if y.CmpInt64(0) == 0 { + yyerror("division by zero") + u.SetOverflow() break } - fallthrough - case OCONV_ | CTINT_, - OCONV_ | CTFLT_, - OCONV_ | CTCPLX_, - OCONV_ | CTSTR_, - OCONV_ | CTBOOL_: - nl = convlit1(nl, n.Type, true, false) - v = nl.Val() - - case OPLUS_ | CTINT_: - break - - case OMINUS_ | CTINT_: - v.U.(*Mpint).Neg() - - case OCOM_ | CTINT_: - et := Txxx - if nl.Type != nil { - et = nl.Type.Etype + u.Quo(y) + case OMOD: + if y.CmpInt64(0) == 0 { + yyerror("division by zero") + u.SetOverflow() + break } + u.Rem(y) + case OOR: + u.Or(y) + case OAND: + u.And(y) + case OANDNOT: + u.AndNot(y) + case OXOR: + u.Xor(y) + default: + break Outer + } + return Val{U: u} - // calculate the mask in b - // result will be (a ^ mask) - var b Mpint - switch et { - // signed guys change sign - default: - b.SetInt64(-1) + case CTFLT: + x, y := x.U.(*Mpflt), y.U.(*Mpflt) - // unsigned guys invert their bits - case TUINT8, - TUINT16, - TUINT32, - TUINT64, - TUINT, - TUINTPTR: - b.Set(maxintval[et]) + u := newMpflt() + u.Set(x) + switch op { + case OADD: + u.Add(y) + case OSUB: + u.Sub(y) + case OMUL: + u.Mul(y) + case ODIV: + if y.CmpFloat64(0) == 0 { + yyerror("division by zero") + u.SetFloat64(1) + break } - - v.U.(*Mpint).Xor(&b) - - case OPLUS_ | CTFLT_: - break - - case OMINUS_ | CTFLT_: - v.U.(*Mpflt).Neg() - - case OPLUS_ | CTCPLX_: - break - - case OMINUS_ | CTCPLX_: - v.U.(*Mpcplx).Real.Neg() - v.U.(*Mpcplx).Imag.Neg() - - case ONOT_ | CTBOOL_: - if !v.U.(bool) { - goto settrue - } - goto setfalse - } - goto ret - } - if nr.Type == nil { - return - } - if consttype(nr) == 0 { - return - } - wr = nr.Type.Etype - if isInt[wr] || isFloat[wr] || isComplex[wr] { - wr = TIDEAL - } - - // check for compatible general types (numeric, string, etc) - if wl != wr { - if wl == TINTER || wr == TINTER { - if n.Op == ONE { - goto settrue - } - goto setfalse - } - goto illegal - } - - // check for compatible types. - switch n.Op { - // ideal const mixes with anything but otherwise must match. - default: - if nl.Type.Etype != TIDEAL { - nr = defaultlit(nr, nl.Type) - n.Right = nr - } - - if nr.Type.Etype != TIDEAL { - nl = defaultlit(nl, nr.Type) - n.Left = nl - } - - if nl.Type.Etype != nr.Type.Etype { - goto illegal - } - - // right must be unsigned. - // left can be ideal. - case OLSH, ORSH: - nr = defaultlit(nr, types.Types[TUINT]) - - n.Right = nr - if nr.Type != nil && (nr.Type.IsSigned() || !nr.Type.IsInteger()) { - goto illegal - } - if nl.Val().Ctype() != CTRUNE { - nl.SetVal(toint(nl.Val())) - } - nr.SetVal(toint(nr.Val())) - } - - // copy numeric value to avoid modifying - // n->left, in case someone still refers to it (e.g. iota). - v = copyval(nl.Val()) - rv = nr.Val() - - // convert to common ideal - if v.Ctype() == CTCPLX || rv.Ctype() == CTCPLX { - v = tocplx(v) - rv = tocplx(rv) - } - - if v.Ctype() == CTFLT || rv.Ctype() == CTFLT { - v = toflt(v) - rv = toflt(rv) - } - - // Rune and int turns into rune. - if v.Ctype() == CTRUNE && rv.Ctype() == CTINT { - i := new(Mpint) - i.Set(rv.U.(*Mpint)) - i.Rune = true - rv.U = i - } - if v.Ctype() == CTINT && rv.Ctype() == CTRUNE { - if n.Op == OLSH || n.Op == ORSH { - i := new(Mpint) - i.Set(rv.U.(*Mpint)) - rv.U = i - } else { - i := new(Mpint) - i.Set(v.U.(*Mpint)) - i.Rune = true - v.U = i - } - } - - if v.Ctype() != rv.Ctype() { - // Use of undefined name as constant? - if (v.Ctype() == 0 || rv.Ctype() == 0) && nerrors > 0 { - return - } - Fatalf("constant type mismatch %v(%d) %v(%d)", nl.Type, v.Ctype(), nr.Type, rv.Ctype()) - } - - // rune values are int values for the purpose of constant folding. - ctype = uint32(v.Ctype()) - if ctype == CTRUNE_ { - ctype = CTINT_ - } - - // run op - switch uint32(n.Op)<<16 | ctype { - default: - goto illegal - - case OADD_ | CTINT_: - v.U.(*Mpint).Add(rv.U.(*Mpint)) - - case OSUB_ | CTINT_: - v.U.(*Mpint).Sub(rv.U.(*Mpint)) - - case OMUL_ | CTINT_: - v.U.(*Mpint).Mul(rv.U.(*Mpint)) - - case ODIV_ | CTINT_: - if rv.U.(*Mpint).CmpInt64(0) == 0 { - yyerror("division by zero") - v.U.(*Mpint).SetOverflow() - break - } - - v.U.(*Mpint).Quo(rv.U.(*Mpint)) - - case OMOD_ | CTINT_: - if rv.U.(*Mpint).CmpInt64(0) == 0 { - yyerror("division by zero") - v.U.(*Mpint).SetOverflow() - break - } - - v.U.(*Mpint).Rem(rv.U.(*Mpint)) - - case OLSH_ | CTINT_: - v.U.(*Mpint).Lsh(rv.U.(*Mpint)) - - case ORSH_ | CTINT_: - v.U.(*Mpint).Rsh(rv.U.(*Mpint)) - - case OOR_ | CTINT_: - v.U.(*Mpint).Or(rv.U.(*Mpint)) - - case OAND_ | CTINT_: - v.U.(*Mpint).And(rv.U.(*Mpint)) - - case OANDNOT_ | CTINT_: - v.U.(*Mpint).AndNot(rv.U.(*Mpint)) - - case OXOR_ | CTINT_: - v.U.(*Mpint).Xor(rv.U.(*Mpint)) - - case OADD_ | CTFLT_: - v.U.(*Mpflt).Add(rv.U.(*Mpflt)) - - case OSUB_ | CTFLT_: - v.U.(*Mpflt).Sub(rv.U.(*Mpflt)) - - case OMUL_ | CTFLT_: - v.U.(*Mpflt).Mul(rv.U.(*Mpflt)) - - case ODIV_ | CTFLT_: - if rv.U.(*Mpflt).CmpFloat64(0) == 0 { - yyerror("division by zero") - v.U.(*Mpflt).SetFloat64(1.0) - break - } - - v.U.(*Mpflt).Quo(rv.U.(*Mpflt)) - - // The default case above would print 'ideal % ideal', - // which is not quite an ideal error. - case OMOD_ | CTFLT_: - if !n.Diag() { + u.Quo(y) + case OMOD: + // TODO(mdempsky): Move to typecheck. yyerror("illegal constant expression: floating-point %% operation") - n.SetDiag(true) + default: + break Outer } + return Val{U: u} - return + case CTCPLX: + x, y := x.U.(*Mpcplx), y.U.(*Mpcplx) - case OADD_ | CTCPLX_: - v.U.(*Mpcplx).Real.Add(&rv.U.(*Mpcplx).Real) - v.U.(*Mpcplx).Imag.Add(&rv.U.(*Mpcplx).Imag) - - case OSUB_ | CTCPLX_: - v.U.(*Mpcplx).Real.Sub(&rv.U.(*Mpcplx).Real) - v.U.(*Mpcplx).Imag.Sub(&rv.U.(*Mpcplx).Imag) - - case OMUL_ | CTCPLX_: - v.U.(*Mpcplx).Mul(rv.U.(*Mpcplx)) - - case ODIV_ | CTCPLX_: - if !v.U.(*Mpcplx).Div(rv.U.(*Mpcplx)) { - yyerror("complex division by zero") - rv.U.(*Mpcplx).Real.SetFloat64(1.0) - rv.U.(*Mpcplx).Imag.SetFloat64(0.0) - break + u := new(Mpcplx) + u.Real.Set(&x.Real) + u.Imag.Set(&x.Imag) + switch op { + case OADD: + u.Real.Add(&y.Real) + u.Imag.Add(&y.Imag) + case OSUB: + u.Real.Sub(&y.Real) + u.Imag.Sub(&y.Imag) + case OMUL: + u.Mul(y) + case ODIV: + if !u.Div(y) { + yyerror("complex division by zero") + u.Real.SetFloat64(1) + u.Imag.SetFloat64(0) + } + default: + break Outer } - - case OEQ_ | CTNIL_: - goto settrue - - case ONE_ | CTNIL_: - goto setfalse - - case OEQ_ | CTINT_: - if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) == 0 { - goto settrue - } - goto setfalse - - case ONE_ | CTINT_: - if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) != 0 { - goto settrue - } - goto setfalse - - case OLT_ | CTINT_: - if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) < 0 { - goto settrue - } - goto setfalse - - case OLE_ | CTINT_: - if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) <= 0 { - goto settrue - } - goto setfalse - - case OGE_ | CTINT_: - if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) >= 0 { - goto settrue - } - goto setfalse - - case OGT_ | CTINT_: - if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) > 0 { - goto settrue - } - goto setfalse - - case OEQ_ | CTFLT_: - if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) == 0 { - goto settrue - } - goto setfalse - - case ONE_ | CTFLT_: - if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) != 0 { - goto settrue - } - goto setfalse - - case OLT_ | CTFLT_: - if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) < 0 { - goto settrue - } - goto setfalse - - case OLE_ | CTFLT_: - if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) <= 0 { - goto settrue - } - goto setfalse - - case OGE_ | CTFLT_: - if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) >= 0 { - goto settrue - } - goto setfalse - - case OGT_ | CTFLT_: - if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) > 0 { - goto settrue - } - goto setfalse - - case OEQ_ | CTCPLX_: - if v.U.(*Mpcplx).Real.Cmp(&rv.U.(*Mpcplx).Real) == 0 && v.U.(*Mpcplx).Imag.Cmp(&rv.U.(*Mpcplx).Imag) == 0 { - goto settrue - } - goto setfalse - - case ONE_ | CTCPLX_: - if v.U.(*Mpcplx).Real.Cmp(&rv.U.(*Mpcplx).Real) != 0 || v.U.(*Mpcplx).Imag.Cmp(&rv.U.(*Mpcplx).Imag) != 0 { - goto settrue - } - goto setfalse - - case OEQ_ | CTSTR_: - if strlit(nl) == strlit(nr) { - goto settrue - } - goto setfalse - - case ONE_ | CTSTR_: - if strlit(nl) != strlit(nr) { - goto settrue - } - goto setfalse - - case OLT_ | CTSTR_: - if strlit(nl) < strlit(nr) { - goto settrue - } - goto setfalse - - case OLE_ | CTSTR_: - if strlit(nl) <= strlit(nr) { - goto settrue - } - goto setfalse - - case OGE_ | CTSTR_: - if strlit(nl) >= strlit(nr) { - goto settrue - } - goto setfalse - - case OGT_ | CTSTR_: - if strlit(nl) > strlit(nr) { - goto settrue - } - goto setfalse - - case OOROR_ | CTBOOL_: - if v.U.(bool) || rv.U.(bool) { - goto settrue - } - goto setfalse - - case OANDAND_ | CTBOOL_: - if v.U.(bool) && rv.U.(bool) { - goto settrue - } - goto setfalse - - case OEQ_ | CTBOOL_: - if v.U.(bool) == rv.U.(bool) { - goto settrue - } - goto setfalse - - case ONE_ | CTBOOL_: - if v.U.(bool) != rv.U.(bool) { - goto settrue - } - goto setfalse + return Val{U: u} } -ret: - setconst(n, v) - return + Fatalf("binaryOp: bad operation: %v %v %v", x, op, y) + panic("unreachable") +} -settrue: - setconst(n, Val{true}) - return +func unaryOp(op Op, x Val, t *types.Type) Val { + switch op { + case OPLUS: + switch x.Ctype() { + case CTINT, CTRUNE, CTFLT, CTCPLX: + return x + } -setfalse: - setconst(n, Val{false}) - return + case OMINUS: + switch x.Ctype() { + case CTINT, CTRUNE: + x := x.U.(*Mpint) + u := new(Mpint) + u.Rune = x.Rune + u.Set(x) + u.Neg() + return Val{U: u} -illegal: - if !n.Diag() { - yyerror("illegal constant expression: %v %v %v", nl.Type, n.Op, nr.Type) - n.SetDiag(true) + case CTFLT: + x := x.U.(*Mpflt) + u := newMpflt() + u.Set(x) + u.Neg() + return Val{U: u} + + case CTCPLX: + x := x.U.(*Mpcplx) + u := new(Mpcplx) + u.Real.Set(&x.Real) + u.Imag.Set(&x.Imag) + u.Real.Neg() + u.Imag.Neg() + return Val{U: u} + } + + case OCOM: + x := x.U.(*Mpint) + + u := new(Mpint) + u.Rune = x.Rune + if t.IsSigned() || t.IsUntyped() { + // Signed values change sign. + u.SetInt64(-1) + } else { + // Unsigned values invert their bits. + u.Set(maxintval[t.Etype]) + } + u.Xor(x) + return Val{U: u} + + case ONOT: + return Val{U: !x.U.(bool)} } + + Fatalf("unaryOp: bad operation: %v %v", op, x) + panic("unreachable") +} + +func shiftOp(x Val, op Op, y Val) Val { + if x.Ctype() != CTRUNE { + x = toint(x) + } + y = toint(y) + + u := new(Mpint) + u.Set(x.U.(*Mpint)) + u.Rune = x.U.(*Mpint).Rune + switch op { + case OLSH: + u.Lsh(y.U.(*Mpint)) + case ORSH: + u.Rsh(y.U.(*Mpint)) + default: + Fatalf("shiftOp: bad operator: %v", op) + panic("unreachable") + } + return Val{U: u} } // setconst rewrites n as an OLITERAL with value v. @@ -1223,6 +972,10 @@ func setconst(n *Node, v Val) { } } +func setboolconst(n *Node, v bool) { + setconst(n, Val{U: v}) +} + func setintconst(n *Node, v int64) { u := new(Mpint) u.SetInt64(v) @@ -1305,9 +1058,7 @@ func idealkind(n *Node) Ctype { OLT, ONE, ONOT, - OOROR, - OCMPSTR, - OCMPIFACE: + OOROR: return CTBOOL // shifts (beware!). @@ -1479,11 +1230,10 @@ func smallintconst(n *Node) bool { TUINT16, TINT32, TUINT32, - TBOOL, - TPTR32: + TBOOL: return true - case TIDEAL, TINT64, TUINT64, TPTR64: + case TIDEAL, TINT64, TUINT64, TPTR: v, ok := n.Val().U.(*Mpint) if ok && v.Cmp(minintval[TINT32]) > 0 && v.Cmp(maxintval[TINT32]) < 0 { return true diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 736ea0a018..516c33d0bb 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -929,7 +929,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F } // eqtype only checks that incoming and result parameters match, // so explicitly check that the receiver parameters match too. - if !eqtype(t, f.Type) || !eqtype(t.Recv().Type, f.Type.Recv().Type) { + if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) { yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) } return f diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 145007f5e1..ad43b3caec 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -798,9 +798,8 @@ func (e *EscState) esc(n *Node, parent *Node) { // gathered here. if n.Esc != EscHeap && n.Type != nil && (n.Type.Width > maxStackVarSize || - (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 || + (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize || n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { - // isSmallMakeSlice returns false for non-constant len/cap. // If that's the case, print a more accurate escape reason. var msgVerb, escapeMsg string @@ -873,7 +872,7 @@ opSwitch: // it is also a dereference, because it is implicitly // dereferenced (see #12588) if n.Type.IsArray() && - !(n.Right.Type.IsPtr() && eqtype(n.Right.Type.Elem(), n.Type)) { + !(n.Right.Type.IsPtr() && types.Identical(n.Right.Type.Elem(), n.Type)) { e.escassignWhyWhere(n.List.Second(), n.Right, "range", n) } else { e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n)) @@ -946,7 +945,8 @@ opSwitch: case OCALLMETH, OCALLFUNC, OCALLINTER: e.esccall(n, parent) - // esccall already done on n.Rlist.First(). tie it's Retval to n.List + // esccall already done on n.Rlist.First() + // tie its Retval to n.List case OAS2FUNC: // x,y = f() rs := e.nodeEscState(n.Rlist.First()).Retval.Slice() where := n @@ -1507,7 +1507,7 @@ func (e *EscState) addDereference(n *Node) *Node { e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth ind.Pos = n.Pos t := n.Type - if t.IsKind(types.Tptr) || t.IsSlice() { + if t.IsPtr() || t.IsSlice() { // This should model our own sloppy use of OIND to encode // decreasing levels of indirection; i.e., "indirecting" a slice // yields the type of an element. diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 6ee660988a..85916509cb 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -131,7 +131,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node { n := importsym(ipkg, s, op) if n.Op != ONONAME { - if n.Op == op && (n.Class() != ctxt || !eqtype(n.Type, t)) { + if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) { redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path)) } return nil diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 5d2e36ee51..23ed3f7844 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -697,7 +697,7 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string { } switch t.Etype { - case TPTR32, TPTR64: + case TPTR: switch mode { case FTypeId, FTypeIdName: if flag&FmtShort != 0 { @@ -1146,8 +1146,6 @@ var opprec = []int{ OGE: 4, OGT: 4, ONE: 4, - OCMPSTR: 4, - OCMPIFACE: 4, OSEND: 3, OANDAND: 2, OOROR: 1, @@ -1507,11 +1505,6 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { n1.exprfmt(s, nprec, mode) } - case OCMPSTR, OCMPIFACE: - n.Left.exprfmt(s, nprec, mode) - mode.Fprintf(s, " %#v ", n.SubOp()) - n.Right.exprfmt(s, nprec+1, mode) - default: mode.Fprintf(s, "")
buf.WriteString("")
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 7e450e2e66..8e643e6690 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -529,119 +529,6 @@ func methtype(t *types.Type) *types.Type {
return nil
}
-// eqtype reports whether t1 and t2 are identical, following the spec rules.
-//
-// Any cyclic type must go through a named type, and if one is
-// named, it is only identical to the other if they are the same
-// pointer (t1 == t2), so there's no chance of chasing cycles
-// ad infinitum, so no need for a depth counter.
-func eqtype(t1, t2 *types.Type) bool {
- return eqtype1(t1, t2, true, nil)
-}
-
-// eqtypeIgnoreTags is like eqtype but it ignores struct tags for struct identity.
-func eqtypeIgnoreTags(t1, t2 *types.Type) bool {
- return eqtype1(t1, t2, false, nil)
-}
-
-type typePair struct {
- t1 *types.Type
- t2 *types.Type
-}
-
-func eqtype1(t1, t2 *types.Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
- if t1 == t2 {
- return true
- }
- if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
- return false
- }
- if t1.Sym != nil || t2.Sym != nil {
- // Special case: we keep byte/uint8 and rune/int32
- // separate for error messages. Treat them as equal.
- switch t1.Etype {
- case TUINT8:
- return (t1 == types.Types[TUINT8] || t1 == types.Bytetype) && (t2 == types.Types[TUINT8] || t2 == types.Bytetype)
- case TINT32:
- return (t1 == types.Types[TINT32] || t1 == types.Runetype) && (t2 == types.Types[TINT32] || t2 == types.Runetype)
- default:
- return false
- }
- }
-
- if assumedEqual == nil {
- assumedEqual = make(map[typePair]struct{})
- } else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
- return true
- }
- assumedEqual[typePair{t1, t2}] = struct{}{}
-
- switch t1.Etype {
- case TINTER:
- if t1.NumFields() != t2.NumFields() {
- return false
- }
- for i, f1 := range t1.FieldSlice() {
- f2 := t2.Field(i)
- if f1.Sym != f2.Sym || !eqtype1(f1.Type, f2.Type, cmpTags, assumedEqual) {
- return false
- }
- }
- return true
-
- case TSTRUCT:
- if t1.NumFields() != t2.NumFields() {
- return false
- }
- for i, f1 := range t1.FieldSlice() {
- f2 := t2.Field(i)
- if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !eqtype1(f1.Type, f2.Type, cmpTags, assumedEqual) {
- return false
- }
- if cmpTags && f1.Note != f2.Note {
- return false
- }
- }
- return true
-
- case TFUNC:
- // Check parameters and result parameters for type equality.
- // We intentionally ignore receiver parameters for type
- // equality, because they're never relevant.
- for _, f := range types.ParamsResults {
- // Loop over fields in structs, ignoring argument names.
- fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
- if len(fs1) != len(fs2) {
- return false
- }
- for i, f1 := range fs1 {
- f2 := fs2[i]
- if f1.Isddd() != f2.Isddd() || !eqtype1(f1.Type, f2.Type, cmpTags, assumedEqual) {
- return false
- }
- }
- }
- return true
-
- case TARRAY:
- if t1.NumElem() != t2.NumElem() {
- return false
- }
-
- case TCHAN:
- if t1.ChanDir() != t2.ChanDir() {
- return false
- }
-
- case TMAP:
- if !eqtype1(t1.Key(), t2.Key(), cmpTags, assumedEqual) {
- return false
- }
- }
-
- return eqtype1(t1.Elem(), t2.Elem(), cmpTags, assumedEqual)
-}
-
// Are t1 and t2 equal struct types when field names are ignored?
// For deciding whether the result struct from g can be copied
// directly when compiling f(g()).
@@ -655,7 +542,7 @@ func eqtypenoname(t1 *types.Type, t2 *types.Type) bool {
}
for i, f1 := range t1.FieldSlice() {
f2 := t2.Field(i)
- if !eqtype(f1.Type, f2.Type) {
+ if !types.Identical(f1.Type, f2.Type) {
return false
}
}
@@ -670,13 +557,6 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
*why = ""
}
- // TODO(rsc,lvd): This behaves poorly in the presence of inlining.
- // https://golang.org/issue/2795
- if safemode && !inimport && src != nil && src.Etype == TUNSAFEPTR {
- yyerror("cannot use unsafe.Pointer")
- errorexit()
- }
-
if src == dst {
return OCONVNOP
}
@@ -685,7 +565,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
}
// 1. src type is identical to dst.
- if eqtype(src, dst) {
+ if types.Identical(src, dst) {
return OCONVNOP
}
@@ -696,7 +576,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
// we want to recompute the itab. Recomputing the itab ensures
// that itabs are unique (thus an interface with a compile-time
// type I has an itab with interface type I).
- if eqtype(src.Orig, dst.Orig) {
+ if types.Identical(src.Orig, dst.Orig) {
if src.IsEmptyInterface() {
// Conversion between two empty interfaces
// requires no code.
@@ -764,7 +644,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
// src and dst have identical element types, and
// either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
- if eqtype(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
return OCONVNOP
}
}
@@ -772,8 +652,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
// 5. src is the predeclared identifier nil and dst is a nillable type.
if src.Etype == TNIL {
switch dst.Etype {
- case TPTR32,
- TPTR64,
+ case TPTR,
TFUNC,
TMAP,
TCHAN,
@@ -836,14 +715,14 @@ func convertop(src *types.Type, dst *types.Type, why *string) Op {
}
// 2. Ignoring struct tags, src and dst have identical underlying types.
- if eqtypeIgnoreTags(src.Orig, dst.Orig) {
+ if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
return OCONVNOP
}
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types.
if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
- if eqtypeIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
+ if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
return OCONVNOP
}
}
@@ -946,7 +825,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
}
}
- if eqtype(n.Type, t) {
+ if types.Identical(n.Type, t) {
return n
}
@@ -1729,11 +1608,10 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
Curfn = fn
typecheckslice(fn.Nbody.Slice(), Etop)
- // TODO(mdempsky): Investigate why this doesn't work with
- // indexed export. For now, we disable even in non-indexed
- // mode to ensure fair benchmark comparisons and to track down
- // unintended compilation differences.
- if false {
+ // Inline calls within (*T).M wrappers. This is safe because we only
+ // generate those wrappers within the same compilation unit as (T).M.
+ // TODO(mdempsky): Investigate why we can't enable this more generally.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
inlcalls(fn)
}
escAnalyze([]*Node{fn}, false)
@@ -1813,7 +1691,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
return false
}
tm := tms[i]
- if !eqtype(tm.Type, im.Type) {
+ if !types.Identical(tm.Type, im.Type) {
*m = im
*samename = tm
*ptr = 0
@@ -1845,7 +1723,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
return false
}
tm := tms[i]
- if tm.Nointerface() || !eqtype(tm.Type, im.Type) {
+ if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
*m = im
*samename = tm
*ptr = 0
@@ -2003,8 +1881,7 @@ func isdirectiface(t *types.Type) bool {
}
switch t.Etype {
- case TPTR32,
- TPTR64,
+ case TPTR,
TCHAN,
TMAP,
TFUNC,
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index b668409a88..965c545660 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -611,7 +611,7 @@ Outer:
continue
}
for _, n := range prev {
- if eqtype(n.Left.Type, c.node.Left.Type) {
+ if types.Identical(n.Left.Type, c.node.Left.Type) {
yyerrorl(c.node.Pos, "duplicate case %v in type switch\n\tprevious case at %v", c.node.Left.Type, n.Line())
// avoid double-reporting errors
continue Outer
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
index eb2ab6b916..9ea727fa64 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -45,7 +45,7 @@ type Node struct {
// - ONAME nodes that refer to local variables use it to identify their stack frame position.
// - ODOT, ODOTPTR, and OINDREGSP use it to indicate offset relative to their base address.
// - OSTRUCTKEY uses it to store the named field's offset.
- // - Named OLITERALs use it to to store their ambient iota value.
+ // - Named OLITERALs use it to store their ambient iota value.
// Possibly still more uses. If you find any, document them.
Xoffset int64
@@ -65,7 +65,7 @@ func (n *Node) ResetAux() {
func (n *Node) SubOp() Op {
switch n.Op {
- case OASOP, OCMPIFACE, OCMPSTR, ONAME:
+ case OASOP, ONAME:
default:
Fatalf("unexpected op: %v", n.Op)
}
@@ -74,7 +74,7 @@ func (n *Node) SubOp() Op {
func (n *Node) SetSubOp(op Op) {
switch n.Op {
- case OASOP, OCMPIFACE, OCMPSTR, ONAME:
+ case OASOP, ONAME:
default:
Fatalf("unexpected op: %v", n.Op)
}
@@ -603,26 +603,32 @@ const (
OAS2DOTTYPE // List = Rlist (x, ok = I.(int))
OASOP // Left Etype= Right (x += y)
OCALL // Left(List) (function call, method call or type conversion)
- OCALLFUNC // Left(List) (function call f(args))
- OCALLMETH // Left(List) (direct method call x.Method(args))
- OCALLINTER // Left(List) (interface method call x.Method(args))
- OCALLPART // Left.Right (method expression x.Method, not called)
- OCAP // cap(Left)
- OCLOSE // close(Left)
- OCLOSURE // func Type { Body } (func literal)
- OCMPIFACE // Left Etype Right (interface comparison, x == y or x != y)
- OCMPSTR // Left Etype Right (string comparison, x == y, x < y, etc)
- OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
- OMAPLIT // Type{List} (composite literal, Type is map)
- OSTRUCTLIT // Type{List} (composite literal, Type is struct)
- OARRAYLIT // Type{List} (composite literal, Type is array)
- OSLICELIT // Type{List} (composite literal, Type is slice)
- OPTRLIT // &Left (left is composite literal)
- OCONV // Type(Left) (type conversion)
- OCONVIFACE // Type(Left) (type conversion, to interface)
- OCONVNOP // Type(Left) (type conversion, no effect)
- OCOPY // copy(Left, Right)
- ODCL // var Left (declares Left of type Left.Type)
+
+ // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
+ // Prior to walk, they are: Left(List), where List is all regular arguments.
+ // If present, Right is an ODDDARG that holds the
+ // generated slice used in a call to a variadic function.
+ // After walk, List is a series of assignments to temporaries,
+ // and Rlist is an updated set of arguments, including any ODDDARG slice.
+ // TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
+ OCALLFUNC // Left(List/Rlist) (function call f(args))
+ OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
+ OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
+ OCALLPART // Left.Right (method expression x.Method, not called)
+ OCAP // cap(Left)
+ OCLOSE // close(Left)
+ OCLOSURE // func Type { Body } (func literal)
+ OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
+ OMAPLIT // Type{List} (composite literal, Type is map)
+ OSTRUCTLIT // Type{List} (composite literal, Type is struct)
+ OARRAYLIT // Type{List} (composite literal, Type is array)
+ OSLICELIT // Type{List} (composite literal, Type is slice)
+ OPTRLIT // &Left (left is composite literal)
+ OCONV // Type(Left) (type conversion)
+ OCONVIFACE // Type(Left) (type conversion, to interface)
+ OCONVNOP // Type(Left) (type conversion, no effect)
+ OCOPY // copy(Left, Right)
+ ODCL // var Left (declares Left of type Left.Type)
// Used during parsing but don't last.
ODCLFUNC // func f() or func (r) f()
diff --git a/src/cmd/compile/internal/gc/testdata/arith_test.go b/src/cmd/compile/internal/gc/testdata/arith_test.go
index d30d660b34..728ca56892 100644
--- a/src/cmd/compile/internal/gc/testdata/arith_test.go
+++ b/src/cmd/compile/internal/gc/testdata/arith_test.go
@@ -7,6 +7,7 @@
package main
import (
+ "runtime"
"testing"
)
@@ -14,6 +15,13 @@ const (
y = 0x0fffFFFF
)
+var (
+ g8 int8
+ g16 int16
+ g32 int32
+ g64 int64
+)
+
//go:noinline
func lshNop1(x uint64) uint64 {
// two outer shifts should be removed
@@ -915,4 +923,32 @@ func TestArithmetic(t *testing.T) {
testLoadSymCombine(t)
testShiftRemoval(t)
testShiftedOps(t)
+ testDivFixUp(t)
+}
+
+// testDivFixUp ensures that signed division fix-ups are being generated.
+func testDivFixUp(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error("testDivFixUp failed")
+ if e, ok := r.(runtime.Error); ok {
+ t.Logf("%v\n", e.Error())
+ }
+ }
+ }()
+ var w int8 = -128
+ var x int16 = -32768
+ var y int32 = -2147483648
+ var z int64 = -9223372036854775808
+
+ for i := -5; i < 0; i++ {
+ g8 = w / int8(i)
+ g16 = x / int16(i)
+ g32 = y / int32(i)
+ g64 = z / int64(i)
+ g8 = w % int8(i)
+ g16 = x % int16(i)
+ g32 = y % int32(i)
+ g64 = z % int64(i)
+ }
}
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 4831ecca34..0bbd89f05e 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -87,8 +87,7 @@ var _typekind = []string{
TFLOAT64: "float64",
TBOOL: "bool",
TSTRING: "string",
- TPTR32: "pointer",
- TPTR64: "pointer",
+ TPTR: "pointer",
TUNSAFEPTR: "unsafe.Pointer",
TSTRUCT: "struct",
TINTER: "interface",
@@ -297,21 +296,21 @@ func indexlit(n *Node) *Node {
// n.Left = typecheck1(n.Left, top)
func typecheck1(n *Node, top int) *Node {
switch n.Op {
- case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, ORETJMP:
- // n.Sym is a field/method name, not a variable.
- default:
- if n.Sym != nil {
- if n.Op == ONAME && n.SubOp() != 0 && top&Ecall == 0 {
- yyerror("use of builtin %v not in function call", n.Sym)
- n.Type = nil
- return n
- }
+ case OLITERAL, ONAME, ONONAME, OTYPE:
+ if n.Sym == nil {
+ break
+ }
- typecheckdef(n)
- if n.Op == ONONAME {
- n.Type = nil
- return n
- }
+ if n.Op == ONAME && n.SubOp() != 0 && top&Ecall == 0 {
+ yyerror("use of builtin %v not in function call", n.Sym)
+ n.Type = nil
+ return n
+ }
+
+ typecheckdef(n)
+ if n.Op == ONONAME {
+ n.Type = nil
+ return n
}
}
@@ -633,7 +632,7 @@ func typecheck1(n *Node, top int) *Node {
et = TINT
}
aop := OXXX
- if iscmp[n.Op] && t.Etype != TIDEAL && !eqtype(l.Type, r.Type) {
+ if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
// comparison is okay as long as one side is
// assignable to the other. convert so they have
// the same type.
@@ -688,7 +687,7 @@ func typecheck1(n *Node, top int) *Node {
et = t.Etype
}
- if t.Etype != TIDEAL && !eqtype(l.Type, r.Type) {
+ if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
l, r = defaultlit2(l, r, true)
if r.Type.IsInterface() == l.Type.IsInterface() || aop == 0 {
yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
@@ -748,43 +747,22 @@ func typecheck1(n *Node, top int) *Node {
}
}
- if et == TSTRING {
- if iscmp[n.Op] {
- ot := n.Op
- n.Op = OCMPSTR
- n.SetSubOp(ot)
- } else if n.Op == OADD {
- // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
- n.Op = OADDSTR
+ if et == TSTRING && n.Op == OADD {
+ // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ n.Op = OADDSTR
- if l.Op == OADDSTR {
- n.List.Set(l.List.Slice())
- } else {
- n.List.Set1(l)
- }
- if r.Op == OADDSTR {
- n.List.AppendNodes(&r.List)
- } else {
- n.List.Append(r)
- }
- n.Left = nil
- n.Right = nil
+ if l.Op == OADDSTR {
+ n.List.Set(l.List.Slice())
+ } else {
+ n.List.Set1(l)
}
- }
-
- if et == TINTER {
- if l.Op == OLITERAL && l.Val().Ctype() == CTNIL {
- // swap for back end
- n.Left = r
-
- n.Right = l
- } else if r.Op == OLITERAL && r.Val().Ctype() == CTNIL {
- } else // leave alone for back end
- if r.Type.IsInterface() == l.Type.IsInterface() {
- ot := n.Op
- n.Op = OCMPIFACE
- n.SetSubOp(ot)
+ if r.Op == OADDSTR {
+ n.List.AppendNodes(&r.List)
+ } else {
+ n.List.Append(r)
}
+ n.Left = nil
+ n.Right = nil
}
if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
@@ -1255,7 +1233,7 @@ func typecheck1(n *Node, top int) *Node {
// It isn't necessary, so just do a sanity check.
tp := t.Recv().Type
- if l.Left == nil || !eqtype(l.Left.Type, tp) {
+ if l.Left == nil || !types.Identical(l.Left.Type, tp) {
Fatalf("method receiver")
}
@@ -1474,7 +1452,7 @@ func typecheck1(n *Node, top int) *Node {
n.Right = r
}
- if !eqtype(l.Type, r.Type) {
+ if !types.Identical(l.Type, r.Type) {
yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
n.Type = nil
return n
@@ -1679,7 +1657,7 @@ func typecheck1(n *Node, top int) *Node {
// copy([]byte, string)
if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
- if eqtype(n.Left.Type.Elem(), types.Bytetype) {
+ if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
break
}
yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
@@ -1699,7 +1677,7 @@ func typecheck1(n *Node, top int) *Node {
return n
}
- if !eqtype(n.Left.Type.Elem(), n.Right.Type.Elem()) {
+ if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
n.Type = nil
return n
@@ -1741,14 +1719,14 @@ func typecheck1(n *Node, top int) *Node {
}
}
- // do not use stringtoarraylit.
+ // do not convert to []byte literal. See CL 125796.
// generated code and compiler memory footprint is better without it.
case OSTRARRAYBYTE:
break
case OSTRARRAYRUNE:
if n.Left.Op == OLITERAL {
- n = stringtoarraylit(n)
+ n = stringtoruneslit(n)
}
}
@@ -2134,10 +2112,6 @@ func typecheck1(n *Node, top int) *Node {
}
}
- if safemode && !inimport && !compiling_wrappers && t != nil && t.Etype == TUNSAFEPTR {
- yyerror("cannot use unsafe.Pointer")
- }
-
evconst(n)
if n.Op == OTYPE && top&Etype == 0 {
if !n.Type.Broke() {
@@ -2505,17 +2479,17 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
tt := n.Left.Type
dowidth(tt)
rcvr := f2.Type.Recv().Type
- if !eqtype(rcvr, tt) {
- if rcvr.IsPtr() && eqtype(rcvr.Elem(), tt) {
+ if !types.Identical(rcvr, tt) {
+ if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
checklvalue(n.Left, "call pointer method on")
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
- } else if tt.IsPtr() && !rcvr.IsPtr() && eqtype(tt.Elem(), rcvr) {
+ } else if tt.IsPtr() && !rcvr.IsPtr() && types.Identical(tt.Elem(), rcvr) {
n.Left = nod(OIND, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
- } else if tt.IsPtr() && tt.Elem().IsPtr() && eqtype(derefall(tt), derefall(rcvr)) {
+ } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
for tt.IsPtr() {
// Stop one level early for method with pointer receiver.
@@ -2857,7 +2831,7 @@ func keydup(n *Node, hash map[uint32][]*Node) {
if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
a = a.Left
}
- if !eqtype(a.Type, n.Type) {
+ if !types.Identical(a.Type, n.Type) {
continue
}
cmp.Right = a
@@ -2901,7 +2875,7 @@ func pushtype(n *Node, t *types.Type) {
n.Right.SetImplicit(true) // * is okay
} else if Debug['s'] != 0 {
n.Right = typecheck(n.Right, Etype)
- if n.Right.Type != nil && eqtype(n.Right.Type, t) {
+ if n.Right.Type != nil && types.Identical(n.Right.Type, t) {
fmt.Printf("%v: redundant type: %v\n", n.Line(), t)
}
}
@@ -3287,7 +3261,7 @@ func checkassignlist(stmt *Node, l Nodes) {
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
func samesafeexpr(l *Node, r *Node) bool {
- if l.Op != r.Op || !eqtype(l.Type, r.Type) {
+ if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
return false
}
@@ -3535,27 +3509,19 @@ func typecheckfunc(n *Node) {
}
}
-// The result of stringtoarraylit MUST be assigned back to n, e.g.
-// n.Left = stringtoarraylit(n.Left)
-func stringtoarraylit(n *Node) *Node {
+// The result of stringtoruneslit MUST be assigned back to n, e.g.
+// n.Left = stringtoruneslit(n.Left)
+func stringtoruneslit(n *Node) *Node {
if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
Fatalf("stringtoarraylit %v", n)
}
- s := n.Left.Val().U.(string)
var l []*Node
- if n.Type.Elem().Etype == TUINT8 {
- // []byte
- for i := 0; i < len(s); i++ {
- l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(s[0]))))
- }
- } else {
- // []rune
- i := 0
- for _, r := range s {
- l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
- i++
- }
+ s := n.Left.Val().U.(string)
+ i := 0
+ for _, r := range s {
+ l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
+ i++
}
nn := nod(OCOMPLIT, nil, typenod(n.Type))
@@ -3688,9 +3654,6 @@ func typecheckdef(n *Node) {
default:
Fatalf("typecheckdef %v", n.Op)
- case OGOTO, OLABEL, OPACK:
- // nothing to do here
-
case OLITERAL:
if n.Name.Param.Ntype != nil {
n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, Etype)
@@ -3731,7 +3694,7 @@ func typecheckdef(n *Node) {
goto ret
}
- if !e.Type.IsUntyped() && !eqtype(t, e.Type) {
+ if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
goto ret
}
diff --git a/src/cmd/compile/internal/gc/types.go b/src/cmd/compile/internal/gc/types.go
index aa0f066a46..ce82c3a52e 100644
--- a/src/cmd/compile/internal/gc/types.go
+++ b/src/cmd/compile/internal/gc/types.go
@@ -32,9 +32,7 @@ const (
TBOOL = types.TBOOL
- TPTR32 = types.TPTR32
- TPTR64 = types.TPTR64
-
+ TPTR = types.TPTR
TFUNC = types.TFUNC
TSLICE = types.TSLICE
TARRAY = types.TARRAY
diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go
index dbc2d647c9..96980ad500 100644
--- a/src/cmd/compile/internal/gc/universe.go
+++ b/src/cmd/compile/internal/gc/universe.go
@@ -177,11 +177,8 @@ func typeinit() {
simtype[et] = et
}
- types.Types[TPTR32] = types.New(TPTR32)
- dowidth(types.Types[TPTR32])
-
- types.Types[TPTR64] = types.New(TPTR64)
- dowidth(types.Types[TPTR64])
+ types.Types[TPTR] = types.New(TPTR)
+ dowidth(types.Types[TPTR])
t := types.New(TUNSAFEPTR)
types.Types[TUNSAFEPTR] = t
@@ -190,11 +187,6 @@ func typeinit() {
asNode(t.Sym.Def).Name = new(Name)
dowidth(types.Types[TUNSAFEPTR])
- types.Tptr = TPTR32
- if Widthptr == 8 {
- types.Tptr = TPTR64
- }
-
for et := TINT8; et <= TUINT64; et++ {
isInt[et] = true
}
@@ -263,8 +255,7 @@ func typeinit() {
okforlen[TSLICE] = true
okforlen[TSTRING] = true
- okforeq[TPTR32] = true
- okforeq[TPTR64] = true
+ okforeq[TPTR] = true
okforeq[TUNSAFEPTR] = true
okforeq[TINTER] = true
okforeq[TCHAN] = true
@@ -357,10 +348,10 @@ func typeinit() {
types.Types[TINTER] = types.New(TINTER)
// simple aliases
- simtype[TMAP] = types.Tptr
- simtype[TCHAN] = types.Tptr
- simtype[TFUNC] = types.Tptr
- simtype[TUNSAFEPTR] = types.Tptr
+ simtype[TMAP] = TPTR
+ simtype[TCHAN] = TPTR
+ simtype[TFUNC] = TPTR
+ simtype[TUNSAFEPTR] = TPTR
array_array = int(Rnd(0, int64(Widthptr)))
array_nel = int(Rnd(int64(array_array)+int64(Widthptr), int64(Widthptr)))
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 1b1d36b61d..9ec6f8286e 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -34,7 +34,7 @@ func walk(fn *Node) {
}
}
- // Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
+ // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
for _, ln := range fn.Func.Dcl {
if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
ln.Name.Defn.Left.Name.SetUsed(true)
@@ -109,32 +109,6 @@ func paramoutheap(fn *Node) bool {
return false
}
-// adds "adjust" to all the argument locations for the call n.
-// n must be a defer or go node that has already been walked.
-func adjustargs(n *Node, adjust int) {
- callfunc := n.Left
- for _, arg := range callfunc.List.Slice() {
- if arg.Op != OAS {
- Fatalf("call arg not assignment")
- }
- lhs := arg.Left
- if lhs.Op == ONAME {
- // This is a temporary introduced by reorder1.
- // The real store to the stack appears later in the arg list.
- continue
- }
-
- if lhs.Op != OINDREGSP {
- Fatalf("call argument store does not use OINDREGSP")
- }
-
- // can't really check this in machine-indep code.
- //if(lhs->val.u.reg != D_SP)
- // Fatalf("call arg assign not indreg(SP)")
- lhs.Xoffset += int64(adjust)
- }
-}
-
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
func walkstmt(n *Node) *Node {
@@ -211,6 +185,7 @@ func walkstmt(n *Node) *Node {
ODCLCONST,
ODCLTYPE,
OCHECKNIL,
+ OVARDEF,
OVARKILL,
OVARLIVE:
break
@@ -263,9 +238,6 @@ func walkstmt(n *Node) *Node {
n.Left = walkexpr(n.Left, &n.Ninit)
}
- // make room for size & fn arguments.
- adjustargs(n, 2*Widthptr)
-
case OFOR, OFORUNTIL:
if n.Left != nil {
walkstmtlist(n.Left.Ninit.Slice())
@@ -333,8 +305,19 @@ func walkstmt(n *Node) *Node {
}
walkexprlist(n.List.Slice(), &n.Ninit)
- ll := ascompatte(nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit)
- n.List.Set(ll)
+ // For each return parameter (lhs), assign the corresponding result (rhs).
+ lhs := Curfn.Type.Results()
+ rhs := n.List.Slice()
+ res := make([]*Node, lhs.NumFields())
+ for i, nl := range lhs.FieldSlice() {
+ nname := asNode(nl.Nname)
+ if nname.isParamHeapCopy() {
+ nname = nname.Name.Param.Stackcopy
+ }
+ a := nod(OAS, nname, rhs[i])
+ res[i] = convas(a, &n.Ninit)
+ }
+ n.List.Set(res)
case ORETJMP:
break
@@ -366,7 +349,7 @@ func isSmallMakeSlice(n *Node) bool {
}
t := n.Type
- return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width)
+ return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < maxImplicitStackVarSize/t.Elem().Width)
}
// walk the whole tree of the body of an
@@ -416,9 +399,9 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
return "convT2E64", false
case from.IsString():
- return "convT2Estring", true
+ return "convT2Estring", false
case from.IsSlice():
- return "convT2Eslice", true
+ return "convT2Eslice", false
case !types.Haspointers(from):
return "convT2Enoptr", true
}
@@ -432,9 +415,9 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
return "convT2I64", false
case from.IsString():
- return "convT2Istring", true
+ return "convT2Istring", false
case from.IsSlice():
- return "convT2Islice", true
+ return "convT2Islice", false
case !types.Haspointers(from):
return "convT2Inoptr", true
}
@@ -513,7 +496,7 @@ opswitch:
OIND, OSPTR, OITAB, OIDATA, OADDR:
n.Left = walkexpr(n.Left, init)
- case OEFACE, OAND, OSUB, OMUL, OLT, OLE, OGE, OGT, OADD, OOR, OXOR:
+ case OEFACE, OAND, OSUB, OMUL, OADD, OOR, OXOR:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@@ -583,19 +566,8 @@ opswitch:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
- case OEQ, ONE:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
-
- // Disable safemode while compiling this code: the code we
- // generate internally can refer to unsafe.Pointer.
- // In this case it can happen if we need to generate an ==
- // for a struct containing a reflect.Value, which itself has
- // an unexported field of type unsafe.Pointer.
- old_safemode := safemode
- safemode = false
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
n = walkcompare(n, init)
- safemode = old_safemode
case OANDAND, OOROR:
n.Left = walkexpr(n.Left, init)
@@ -622,19 +594,12 @@ opswitch:
case OCLOSUREVAR, OCFUNC:
n.SetAddable(true)
- case OCALLINTER:
- usemethod(n)
- t := n.Left.Type
- if n.List.Len() != 0 && n.List.First().Op == OAS {
- break
+ case OCALLINTER, OCALLFUNC, OCALLMETH:
+ if n.Op == OCALLINTER {
+ usemethod(n)
}
- n.Left = walkexpr(n.Left, init)
- walkexprlist(n.List.Slice(), init)
- ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
- n.List.Set(reorder1(ll))
- case OCALLFUNC:
- if n.Left.Op == OCLOSURE {
+ if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE {
// Transform direct call of a closure to call of a normal function.
// transformclosure already did all preparation work.
@@ -655,30 +620,7 @@ opswitch:
}
}
- t := n.Left.Type
- if n.List.Len() != 0 && n.List.First().Op == OAS {
- break
- }
-
- n.Left = walkexpr(n.Left, init)
- walkexprlist(n.List.Slice(), init)
-
- ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
- n.List.Set(reorder1(ll))
-
- case OCALLMETH:
- t := n.Left.Type
- if n.List.Len() != 0 && n.List.First().Op == OAS {
- break
- }
- n.Left = walkexpr(n.Left, init)
- walkexprlist(n.List.Slice(), init)
- ll := ascompatte(n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init)
- lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
- ll = append(ll, lr...)
- n.Left.Left = nil
- updateHasCall(n.Left)
- n.List.Set(reorder1(ll))
+ walkCall(n, init)
case OAS, OASOP:
init.AppendNodes(&n.Ninit)
@@ -1203,7 +1145,7 @@ opswitch:
case ONEW:
if n.Esc == EscNone {
- if n.Type.Elem().Width >= 1<<16 {
+ if n.Type.Elem().Width >= maxImplicitStackVarSize {
Fatalf("large ONEW with EscNone: %v", n)
}
r := temp(n.Type.Elem())
@@ -1217,149 +1159,6 @@ opswitch:
n = callnew(n.Type.Elem())
}
- case OCMPSTR:
- // s + "badgerbadgerbadger" == "badgerbadgerbadger"
- if (n.SubOp() == OEQ || n.SubOp() == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) {
- r := nod(n.SubOp(), nod(OLEN, n.Left.List.First(), nil), nodintconst(0))
- n = finishcompare(n, r, init)
- break
- }
-
- // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs *Node // const string, non-const string
- switch {
- case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
- // ignore; will be constant evaluated
- case Isconst(n.Left, CTSTR):
- cs = n.Left
- ncs = n.Right
- case Isconst(n.Right, CTSTR):
- cs = n.Right
- ncs = n.Left
- }
- if cs != nil {
- cmp := n.SubOp()
- // Our comparison below assumes that the non-constant string
- // is on the left hand side, so rewrite "" cmp x to x cmp "".
- // See issue 24817.
- if Isconst(n.Left, CTSTR) {
- cmp = brrev(cmp)
- }
-
- // maxRewriteLen was chosen empirically.
- // It is the value that minimizes cmd/go file size
- // across most architectures.
- // See the commit description for CL 26758 for details.
- maxRewriteLen := 6
- // Some architectures can load unaligned byte sequence as 1 word.
- // So we can cover longer strings with the same amount of code.
- canCombineLoads := canMergeLoads()
- combine64bit := false
- if canCombineLoads {
- // Keep this low enough to generate less code than a function call.
- maxRewriteLen = 2 * thearch.LinkArch.RegSize
- combine64bit = thearch.LinkArch.RegSize >= 8
- }
-
- var and Op
- switch cmp {
- case OEQ:
- and = OANDAND
- case ONE:
- and = OOROR
- default:
- // Don't do byte-wise comparisons for <, <=, etc.
- // They're fairly complicated.
- // Length-only checks are ok, though.
- maxRewriteLen = 0
- }
- if s := cs.Val().U.(string); len(s) <= maxRewriteLen {
- if len(s) > 0 {
- ncs = safeexpr(ncs, init)
- }
- r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
- remains := len(s)
- for i := 0; remains > 0; {
- if remains == 1 || !canCombineLoads {
- cb := nodintconst(int64(s[i]))
- ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
- r = nod(and, r, nod(cmp, ncb, cb))
- remains--
- i++
- continue
- }
- var step int
- var convType *types.Type
- switch {
- case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
- step = 8
- case remains >= 4:
- convType = types.Types[TUINT32]
- step = 4
- case remains >= 2:
- convType = types.Types[TUINT16]
- step = 2
- }
- ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
- ncsubstr = conv(ncsubstr, convType)
- csubstr := int64(s[i])
- // Calculate large constant from bytes as sequence of shifts and ors.
- // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
- // ssa will combine this into a single large load.
- for offset := 1; offset < step; offset++ {
- b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
- b = conv(b, convType)
- b = nod(OLSH, b, nodintconst(int64(8*offset)))
- ncsubstr = nod(OOR, ncsubstr, b)
- csubstr |= int64(s[i+offset]) << uint8(8*offset)
- }
- csubstrPart := nodintconst(csubstr)
- // Compare "step" bytes as once
- r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
- remains -= step
- i += step
- }
- n = finishcompare(n, r, init)
- break
- }
- }
-
- var r *Node
- if n.SubOp() == OEQ || n.SubOp() == ONE {
- // prepare for rewrite below
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
-
- lstr := conv(n.Left, types.Types[TSTRING])
- rstr := conv(n.Right, types.Types[TSTRING])
- lptr := nod(OSPTR, lstr, nil)
- rptr := nod(OSPTR, rstr, nil)
- llen := conv(nod(OLEN, lstr, nil), types.Types[TUINTPTR])
- rlen := conv(nod(OLEN, rstr, nil), types.Types[TUINTPTR])
-
- fn := syslook("memequal")
- fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
- r = mkcall1(fn, types.Types[TBOOL], init, lptr, rptr, llen)
-
- // quick check of len before full compare for == or !=.
- // memequal then tests equality up to length len.
- if n.SubOp() == OEQ {
- // len(left) == len(right) && memequal(left, right, len)
- r = nod(OANDAND, nod(OEQ, llen, rlen), r)
- } else {
- // len(left) != len(right) || !memequal(left, right, len)
- r = nod(ONOT, r, nil)
- r = nod(OOROR, nod(ONE, llen, rlen), r)
- }
- } else {
- // sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
- r = nod(n.SubOp(), r, nodintconst(0))
- }
-
- n = finishcompare(n, r, init)
-
case OADDSTR:
n = addstr(n, init)
@@ -1592,8 +1391,38 @@ opswitch:
n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
- // stringtoslicebyte(*32[byte], string) []byte;
case OSTRARRAYBYTE:
+ s := n.Left
+ if Isconst(s, CTSTR) {
+ sc := s.Val().U.(string)
+
+ // Allocate a [n]byte of the right size.
+ t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
+ var a *Node
+ if n.Esc == EscNone && len(sc) <= maxImplicitStackVarSize {
+ a = nod(OADDR, temp(t), nil)
+ } else {
+ a = callnew(t)
+ }
+ p := temp(t.PtrTo()) // *[n]byte
+ init.Append(typecheck(nod(OAS, p, a), Etop))
+
+ // Copy from the static string data to the [n]byte.
+ if len(sc) > 0 {
+ as := nod(OAS,
+ nod(OIND, p, nil),
+ nod(OIND, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil))
+ as = typecheck(as, Etop)
+ as = walkstmt(as)
+ init.Append(as)
+ }
+
+ // Slice the [n]byte to a []byte.
+ n.Op = OSLICEARR
+ n.Left = p
+ n = walkexpr(n, init)
+ break
+ }
a := nodnil()
if n.Esc == EscNone {
@@ -1603,7 +1432,8 @@ opswitch:
a = nod(OADDR, temp(t), nil)
}
- n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
+ // stringtoslicebyte(*32[byte], string) []byte;
+ n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING]))
case OSTRARRAYBYTETMP:
// []byte(string) conversion that creates a slice
@@ -1628,40 +1458,6 @@ opswitch:
n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
- // ifaceeq(i1 any-1, i2 any-2) (ret bool);
- case OCMPIFACE:
- if !eqtype(n.Left.Type, n.Right.Type) {
- Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type)
- }
- var fn *Node
- if n.Left.Type.IsEmptyInterface() {
- fn = syslook("efaceeq")
- } else {
- fn = syslook("ifaceeq")
- }
-
- n.Right = cheapexpr(n.Right, init)
- n.Left = cheapexpr(n.Left, init)
- lt := nod(OITAB, n.Left, nil)
- rt := nod(OITAB, n.Right, nil)
- ld := nod(OIDATA, n.Left, nil)
- rd := nod(OIDATA, n.Right, nil)
- ld.Type = types.Types[TUNSAFEPTR]
- rd.Type = types.Types[TUNSAFEPTR]
- ld.SetTypecheck(1)
- rd.SetTypecheck(1)
- call := mkcall1(fn, n.Type, init, lt, ld, rd)
-
- // Check itable/type before full compare.
- // Note: short-circuited because order matters.
- var cmp *Node
- if n.SubOp() == OEQ {
- cmp = nod(OANDAND, nod(OEQ, lt, rt), call)
- } else {
- cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil))
- }
- n = finishcompare(n, cmp, init)
-
case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
// n can be directly represented in the read-only data section.
@@ -1837,7 +1633,7 @@ func fncall(l *Node, rt *types.Type) bool {
if l.HasCall() || l.Op == OINDEXMAP {
return true
}
- if eqtype(l.Type, rt) {
+ if types.Identical(l.Type, rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
@@ -1870,7 +1666,7 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node {
l = tmp
}
- a := nod(OAS, l, nodarg(r, 0))
+ a := nod(OAS, l, nodarg(r))
a = convas(a, &nn)
updateHasCall(a)
if a.HasCall() {
@@ -1883,99 +1679,23 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node {
return append(nn.Slice(), mm.Slice()...)
}
-// nodarg returns a Node for the function argument denoted by t,
-// which is either the entire function argument or result struct (t is a struct *types.Type)
-// or a specific argument (t is a *types.Field within a struct *types.Type).
+// nodarg returns a Node for the function argument f.
+// f is a *types.Field within a struct *types.Type.
//
-// If fp is 0, the node is for use by a caller invoking the given
+// The node is for use by a caller invoking the given
// function, preparing the arguments before the call
// or retrieving the results after the call.
// In this case, the node will correspond to an outgoing argument
// slot like 8(SP).
-//
-// If fp is 1, the node is for use by the function itself
-// (the callee), to retrieve its arguments or write its results.
-// In this case the node will be an ONAME with an appropriate
-// type and offset.
-func nodarg(t interface{}, fp int) *Node {
- var n *Node
-
- switch t := t.(type) {
- default:
- Fatalf("bad nodarg %T(%v)", t, t)
-
- case *types.Type:
- // Entire argument struct, not just one arg
- if !t.IsFuncArgStruct() {
- Fatalf("nodarg: bad type %v", t)
- }
-
- // Build fake variable name for whole arg struct.
- n = newname(lookup(".args"))
- n.Type = t
- first := t.Field(0)
- if first == nil {
- Fatalf("nodarg: bad struct")
- }
- if first.Offset == BADWIDTH {
- Fatalf("nodarg: offset not computed for %v", t)
- }
- n.Xoffset = first.Offset
-
- case *types.Field:
- if fp == 1 {
- // NOTE(rsc): This should be using t.Nname directly,
- // except in the case where t.Nname.Sym is the blank symbol and
- // so the assignment would be discarded during code generation.
- // In that case we need to make a new node, and there is no harm
- // in optimization passes to doing so. But otherwise we should
- // definitely be using the actual declaration and not a newly built node.
- // The extra Fatalf checks here are verifying that this is the case,
- // without changing the actual logic (at time of writing, it's getting
- // toward time for the Go 1.7 beta).
- // At some quieter time (assuming we've never seen these Fatalfs happen)
- // we could change this code to use "expect" directly.
- expect := asNode(t.Nname)
- if expect.isParamHeapCopy() {
- expect = expect.Name.Param.Stackcopy
- }
-
- for _, n := range Curfn.Func.Dcl {
- if (n.Class() == PPARAM || n.Class() == PPARAMOUT) && !t.Sym.IsBlank() && n.Sym == t.Sym {
- if n != expect {
- Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, asNode(t.Nname), asNode(t.Nname), asNode(t.Nname).Op)
- }
- return n
- }
- }
-
- if !expect.Sym.IsBlank() {
- Fatalf("nodarg: did not find node in dcl list: %v", expect)
- }
- }
-
- // Build fake name for individual variable.
- // This is safe because if there was a real declared name
- // we'd have used it above.
- n = newname(lookup("__"))
- n.Type = t.Type
- if t.Offset == BADWIDTH {
- Fatalf("nodarg: offset not computed for %v", t)
- }
- n.Xoffset = t.Offset
- n.Orig = asNode(t.Nname)
- }
-
- // Rewrite argument named _ to __,
- // or else the assignment to _ will be
- // discarded during code generation.
- if n.isBlank() {
- n.Sym = lookup("__")
- }
-
- if fp != 0 {
- Fatalf("bad fp: %v", fp)
+func nodarg(f *types.Field) *Node {
+ // Build fake name for individual variable.
+ n := newname(lookup("__"))
+ n.Type = f.Type
+ if f.Offset == BADWIDTH {
+ Fatalf("nodarg: offset not computed for %v", f)
}
+ n.Xoffset = f.Offset
+ n.Orig = asNode(f.Nname)
// preparing arguments for call
n.Op = OINDREGSP
@@ -2012,59 +1732,58 @@ func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node
return n
}
-// check assign expression list to
-// a type list. called in
-// return expr-list
-// func(expr-list)
-func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node {
- // f(g()) where g has multiple return values
- if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() {
- // optimization - can do block copy
- if eqtypenoname(rhs[0].Type, lhs) {
- nl := nodarg(lhs, fp)
- nr := convnop(rhs[0], nl.Type)
- n := convas(nod(OAS, nl, nr), init)
- n.SetTypecheck(1)
- return []*Node{n}
- }
-
- // conversions involved.
- // copy into temporaries.
- var tmps []*Node
- for _, nr := range rhs[0].Type.FieldSlice() {
- tmps = append(tmps, temp(nr.Type))
- }
-
- a := nod(OAS2, nil, nil)
- a.List.Set(tmps)
- a.Rlist.Set(rhs)
- a = typecheck(a, Etop)
- a = walkstmt(a)
- init.Append(a)
-
- rhs = tmps
+func walkCall(n *Node, init *Nodes) {
+ if n.Rlist.Len() != 0 {
+ return // already walked
}
+ n.Left = walkexpr(n.Left, init)
+ walkexprlist(n.List.Slice(), init)
- // For each parameter (LHS), assign its corresponding argument (RHS).
+ params := n.Left.Type.Params()
+ args := n.List.Slice()
// If there's a ... parameter (which is only valid as the final
// parameter) and this is not a ... call expression,
// then assign the remaining arguments as a slice.
- var nn []*Node
- for i, nl := range lhs.FieldSlice() {
- var nr *Node
- if nl.Isddd() && !isddd {
- nr = mkdotargslice(nl.Type, rhs[i:], init, call.Right)
- } else {
- nr = rhs[i]
+ if nf := params.NumFields(); nf > 0 {
+ if last := params.Field(nf - 1); last.Isddd() && !n.Isddd() {
+ tail := args[nf-1:]
+ slice := mkdotargslice(last.Type, tail, init, n.Right)
+ // Allow immediate GC.
+ for i := range tail {
+ tail[i] = nil
+ }
+ args = append(args[:nf-1], slice)
}
-
- a := nod(OAS, nodarg(nl, fp), nr)
- a = convas(a, init)
- a.SetTypecheck(1)
- nn = append(nn, a)
}
- return nn
+ // If this is a method call, add the receiver at the beginning of the args.
+ if n.Op == OCALLMETH {
+ withRecv := make([]*Node, len(args)+1)
+ withRecv[0] = n.Left.Left
+ n.Left.Left = nil
+ copy(withRecv[1:], args)
+ args = withRecv
+ }
+
+ // For any argument whose evaluation might require a function call,
+ // store that argument into a temporary variable,
+ // to prevent that calls from clobbering arguments already on the stack.
+ // When instrumenting, all arguments might require function calls.
+ var tempAssigns []*Node
+ for i, arg := range args {
+ updateHasCall(arg)
+ if instrumenting || arg.HasCall() {
+ // make assignment of fncall to tempname
+ tmp := temp(arg.Type)
+ a := nod(OAS, tmp, arg)
+ tempAssigns = append(tempAssigns, a)
+ // replace arg with temp
+ args[i] = tmp
+ }
+ }
+
+ n.List.Set(tempAssigns)
+ n.Rlist.Set(args)
}
// generate code for print
@@ -2138,7 +1857,7 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printiface")
}
on = substArgTypes(on, n.Type) // any-1
- case TPTR32, TPTR64, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ case TPTR, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
on = syslook("printpointer")
on = substArgTypes(on, n.Type) // any-1
case TSLICE:
@@ -2179,7 +1898,7 @@ func walkprint(nn *Node, init *Nodes) *Node {
r := nod(OCALL, on, nil)
if params := on.Type.Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
- if !eqtype(t, n.Type) {
+ if !types.Identical(t, n.Type) {
n = nod(OCONV, n, nil)
n.Type = t
}
@@ -2258,7 +1977,7 @@ func convas(n *Node, init *Nodes) *Node {
return n
}
- if !eqtype(lt, rt) {
+ if !types.Identical(lt, rt) {
n.Right = assignconv(n.Right, lt, "assignment")
n.Right = walkexpr(n.Right, init)
}
@@ -2267,71 +1986,6 @@ func convas(n *Node, init *Nodes) *Node {
return n
}
-// from ascompat[te]
-// evaluating actual function arguments.
-// f(a,b)
-// if there is exactly one function expr,
-// then it is done first. otherwise must
-// make temp variables
-func reorder1(all []*Node) []*Node {
- // When instrumenting, force all arguments into temporary
- // variables to prevent instrumentation calls from clobbering
- // arguments already on the stack.
-
- funcCalls := 0
- if !instrumenting {
- if len(all) == 1 {
- return all
- }
-
- for _, n := range all {
- updateHasCall(n)
- if n.HasCall() {
- funcCalls++
- }
- }
- if funcCalls == 0 {
- return all
- }
- }
-
- var g []*Node // fncalls assigned to tempnames
- var f *Node // last fncall assigned to stack
- var r []*Node // non fncalls and tempnames assigned to stack
- d := 0
- for _, n := range all {
- if !instrumenting {
- if !n.HasCall() {
- r = append(r, n)
- continue
- }
-
- d++
- if d == funcCalls {
- f = n
- continue
- }
- }
-
- // make assignment of fncall to tempname
- a := temp(n.Right.Type)
-
- a = nod(OAS, a, n.Right)
- g = append(g, a)
-
- // put normal arg assignment on list
- // with fncall replaced by tempname
- n.Right = a.Left
-
- r = append(r, n)
- }
-
- if f != nil {
- g = append(g, f)
- }
- return append(g, r...)
-}
-
// from ascompat[ee]
// a,b = c,d
// simultaneous assignment. there cannot
@@ -2657,14 +2311,24 @@ func paramstoheap(params *types.Type) []*Node {
// The generated code is added to Curfn's Enter list.
func zeroResults() {
for _, f := range Curfn.Type.Results().Fields().Slice() {
- if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil {
+ v := asNode(f.Nname)
+ if v != nil && v.Name.Param.Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
continue
}
+ if v.isParamHeapCopy() {
+ // TODO(josharian/khr): Investigate whether we can switch to "continue" here,
+ // and document more in either case.
+ // In the review of CL 114797, Keith wrote (roughly):
+ // I don't think the zeroing below matters.
+ // The stack return value will never be marked as live anywhere in the function.
+ // It is not written to until deferreturn returns.
+ v = v.Name.Param.Stackcopy
+ }
// Zero the stack location containing f.
- Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, nodarg(f, 1), nil))
+ Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil))
}
}
@@ -2731,7 +2395,7 @@ func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
}
func conv(n *Node, t *types.Type) *Node {
- if eqtype(n.Type, t) {
+ if types.Identical(n.Type, t) {
return n
}
n = nod(OCONV, n, nil)
@@ -2753,7 +2417,7 @@ func convnop(n *Node, t *types.Type) *Node {
// We cannot use conv, because we allow converting bool to uint8 here,
// which is forbidden in user code.
func byteindex(n *Node) *Node {
- if eqtype(n.Type, types.Types[TUINT8]) {
+ if types.Identical(n.Type, types.Types[TUINT8]) {
return n
}
n = nod(OCONV, n, nil)
@@ -2974,6 +2638,7 @@ func appendslice(n *Node, init *Nodes) *Node {
// s = s[:n]
nt := nod(OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
+ nt.SetBounded(true)
nodes.Append(nod(OAS, s, nt))
var ncopy *Node
@@ -3143,6 +2808,7 @@ func extendslice(n *Node, init *Nodes) *Node {
// s = s[:n]
nt := nod(OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
+ nt.SetBounded(true)
nodes = append(nodes, nod(OAS, s, nt))
// lptr := &l1[0]
@@ -3218,6 +2884,8 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
}
walkexprlistsafe(n.List.Slice()[1:], init)
+ nsrc := n.List.First()
+
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
@@ -3226,11 +2894,14 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
// before we begin to modify the slice in a visible way.
ls := n.List.Slice()[1:]
for i, n := range ls {
- ls[i] = cheapexpr(n, init)
+ n = cheapexpr(n, init)
+ if !types.Identical(n.Type, nsrc.Type.Elem()) {
+ n = assignconv(n, nsrc.Type.Elem(), "append")
+ n = walkexpr(n, init)
+ }
+ ls[i] = n
}
- nsrc := n.List.First()
-
argc := n.List.Len() - 1
if argc < 1 {
return nsrc
@@ -3265,6 +2936,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
+ nx.SetBounded(true)
l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
ls = n.List.Slice()[1:]
@@ -3360,7 +3032,7 @@ func eqfor(t *types.Type) (n *Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
- // is handled during type check (OCMPSTR etc).
+ // is handled by walkcompare.
switch a, _ := algtype1(t); a {
case AMEM:
n := syslook("memequal")
@@ -3385,6 +3057,17 @@ func eqfor(t *types.Type) (n *Node, needsize bool) {
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
func walkcompare(n *Node, init *Nodes) *Node {
+ if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL {
+ return walkcompareInterface(n, init)
+ }
+
+ if n.Left.Type.IsString() && n.Right.Type.IsString() {
+ return walkcompareString(n, init)
+ }
+
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+
// Given interface value l and concrete value r, rewrite
// l == r
// into types-equal && data-equal.
@@ -3597,6 +3280,183 @@ func walkcompare(n *Node, init *Nodes) *Node {
return n
}
+func walkcompareInterface(n *Node, init *Nodes) *Node {
+ // ifaceeq(i1 any-1, i2 any-2) (ret bool);
+ if !types.Identical(n.Left.Type, n.Right.Type) {
+ Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type)
+ }
+ var fn *Node
+ if n.Left.Type.IsEmptyInterface() {
+ fn = syslook("efaceeq")
+ } else {
+ fn = syslook("ifaceeq")
+ }
+
+ n.Right = cheapexpr(n.Right, init)
+ n.Left = cheapexpr(n.Left, init)
+ lt := nod(OITAB, n.Left, nil)
+ rt := nod(OITAB, n.Right, nil)
+ ld := nod(OIDATA, n.Left, nil)
+ rd := nod(OIDATA, n.Right, nil)
+ ld.Type = types.Types[TUNSAFEPTR]
+ rd.Type = types.Types[TUNSAFEPTR]
+ ld.SetTypecheck(1)
+ rd.SetTypecheck(1)
+ call := mkcall1(fn, n.Type, init, lt, ld, rd)
+
+ // Check itable/type before full compare.
+ // Note: short-circuited because order matters.
+ var cmp *Node
+ if n.Op == OEQ {
+ cmp = nod(OANDAND, nod(OEQ, lt, rt), call)
+ } else {
+ cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil))
+ }
+ return finishcompare(n, cmp, init)
+}
+
+func walkcompareString(n *Node, init *Nodes) *Node {
+ // s + "badgerbadgerbadger" == "badgerbadgerbadger"
+ if (n.Op == OEQ || n.Op == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) {
+ r := nod(n.Op, nod(OLEN, n.Left.List.First(), nil), nodintconst(0))
+ return finishcompare(n, r, init)
+ }
+
+ // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
+ var cs, ncs *Node // const string, non-const string
+ switch {
+ case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
+ // ignore; will be constant evaluated
+ case Isconst(n.Left, CTSTR):
+ cs = n.Left
+ ncs = n.Right
+ case Isconst(n.Right, CTSTR):
+ cs = n.Right
+ ncs = n.Left
+ }
+ if cs != nil {
+ cmp := n.Op
+ // Our comparison below assumes that the non-constant string
+ // is on the left hand side, so rewrite "" cmp x to x cmp "".
+ // See issue 24817.
+ if Isconst(n.Left, CTSTR) {
+ cmp = brrev(cmp)
+ }
+
+ // maxRewriteLen was chosen empirically.
+ // It is the value that minimizes cmd/go file size
+ // across most architectures.
+ // See the commit description for CL 26758 for details.
+ maxRewriteLen := 6
+ // Some architectures can load unaligned byte sequence as 1 word.
+ // So we can cover longer strings with the same amount of code.
+ canCombineLoads := canMergeLoads()
+ combine64bit := false
+ if canCombineLoads {
+ // Keep this low enough to generate less code than a function call.
+ maxRewriteLen = 2 * thearch.LinkArch.RegSize
+ combine64bit = thearch.LinkArch.RegSize >= 8
+ }
+
+ var and Op
+ switch cmp {
+ case OEQ:
+ and = OANDAND
+ case ONE:
+ and = OOROR
+ default:
+ // Don't do byte-wise comparisons for <, <=, etc.
+ // They're fairly complicated.
+ // Length-only checks are ok, though.
+ maxRewriteLen = 0
+ }
+ if s := cs.Val().U.(string); len(s) <= maxRewriteLen {
+ if len(s) > 0 {
+ ncs = safeexpr(ncs, init)
+ }
+ r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
+ remains := len(s)
+ for i := 0; remains > 0; {
+ if remains == 1 || !canCombineLoads {
+ cb := nodintconst(int64(s[i]))
+ ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
+ r = nod(and, r, nod(cmp, ncb, cb))
+ remains--
+ i++
+ continue
+ }
+ var step int
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[TINT64]
+ step = 8
+ case remains >= 4:
+ convType = types.Types[TUINT32]
+ step = 4
+ case remains >= 2:
+ convType = types.Types[TUINT16]
+ step = 2
+ }
+ ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
+ ncsubstr = conv(ncsubstr, convType)
+ csubstr := int64(s[i])
+ // Calculate large constant from bytes as sequence of shifts and ors.
+ // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will combine this into a single large load.
+ for offset := 1; offset < step; offset++ {
+ b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
+ b = conv(b, convType)
+ b = nod(OLSH, b, nodintconst(int64(8*offset)))
+ ncsubstr = nod(OOR, ncsubstr, b)
+ csubstr |= int64(s[i+offset]) << uint8(8*offset)
+ }
+ csubstrPart := nodintconst(csubstr)
+ // Compare "step" bytes as once
+ r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
+ remains -= step
+ i += step
+ }
+ return finishcompare(n, r, init)
+ }
+ }
+
+ var r *Node
+ if n.Op == OEQ || n.Op == ONE {
+ // prepare for rewrite below
+ n.Left = cheapexpr(n.Left, init)
+ n.Right = cheapexpr(n.Right, init)
+
+ lstr := conv(n.Left, types.Types[TSTRING])
+ rstr := conv(n.Right, types.Types[TSTRING])
+ lptr := nod(OSPTR, lstr, nil)
+ rptr := nod(OSPTR, rstr, nil)
+ llen := conv(nod(OLEN, lstr, nil), types.Types[TUINTPTR])
+ rlen := conv(nod(OLEN, rstr, nil), types.Types[TUINTPTR])
+
+ fn := syslook("memequal")
+ fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
+ r = mkcall1(fn, types.Types[TBOOL], init, lptr, rptr, llen)
+
+ // quick check of len before full compare for == or !=.
+ // memequal then tests equality up to length len.
+ if n.Op == OEQ {
+ // len(left) == len(right) && memequal(left, right, len)
+ r = nod(OANDAND, nod(OEQ, llen, rlen), r)
+ } else {
+ // len(left) != len(right) || !memequal(left, right, len)
+ r = nod(ONOT, r, nil)
+ r = nod(OOROR, nod(ONE, llen, rlen), r)
+ }
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
+ r = nod(n.Op, r, nodintconst(0))
+ }
+
+ return finishcompare(n, r, init)
+}
+
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
func finishcompare(n, r *Node, init *Nodes) *Node {
@@ -3931,8 +3791,6 @@ func candiscard(n *Node) bool {
OSTRARRAYBYTE,
OSTRARRAYRUNE,
OCAP,
- OCMPIFACE,
- OCMPSTR,
OCOMPLIT,
OMAPLIT,
OSTRUCTLIT,
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 0a7238850c..a3f8b67177 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -313,9 +313,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
arg0 := v.Args[0].Reg()
out := v.Reg0()
- // SYNC
- psync := s.Prog(ppc64.ASYNC)
- psync.To.Type = obj.TYPE_NONE
+ // SYNC when AuxInt == 1; otherwise, load-acquire
+ if v.AuxInt == 1 {
+ psync := s.Prog(ppc64.ASYNC)
+ psync.To.Type = obj.TYPE_NONE
+ }
// Load
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
@@ -338,7 +340,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredAtomicStore32,
ssa.OpPPC64LoweredAtomicStore64:
- // SYNC
+ // SYNC or LWSYNC
// MOVD/MOVW arg1,(arg0)
st := ppc64.AMOVD
if v.Op == ssa.OpPPC64LoweredAtomicStore32 {
@@ -346,8 +348,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
arg0 := v.Args[0].Reg()
arg1 := v.Args[1].Reg()
+ // If AuxInt == 0, LWSYNC (Store-Release), else SYNC
// SYNC
- psync := s.Prog(ppc64.ASYNC)
+ syncOp := ppc64.ASYNC
+ if v.AuxInt == 0 {
+ syncOp = ppc64.ALWSYNC
+ }
+ psync := s.Prog(syncOp)
psync.To.Type = obj.TYPE_NONE
// Store
p := s.Prog(st)
@@ -360,12 +367,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpPPC64LoweredAtomicCas32:
// LWSYNC
// loop:
- // LDAR (Rarg0), Rtmp
+ // LDAR (Rarg0), MutexHint, Rtmp
// CMP Rarg1, Rtmp
// BNE fail
// STDCCC Rarg2, (Rarg0)
// BNE loop
- // LWSYNC
+ // LWSYNC // Only for sequential consistency; not required in CasRel.
// MOVD $1, Rout
// BR end
// fail:
@@ -393,6 +400,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
+ // If it is a Compare-and-Swap-Release operation, set the EH field with
+ // the release hint.
+ if v.AuxInt == 0 {
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0})
+ }
// CMP reg1,reg2
p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
@@ -414,8 +426,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Patch(p4, p)
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
- plwsync2 := s.Prog(ppc64.ALWSYNC)
- plwsync2.To.Type = obj.TYPE_NONE
+ // If the operation is a CAS-Release, then synchronization is not necessary.
+ if v.AuxInt != 0 {
+ plwsync2 := s.Prog(ppc64.ALWSYNC)
+ plwsync2.To.Type = obj.TYPE_NONE
+ }
// return true
p5 := s.Prog(ppc64.AMOVD)
p5.From.Type = obj.TYPE_CONST
@@ -967,7 +982,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredMove:
// This will be used when moving more
- // than 8 bytes. Moves start with as
+ // than 8 bytes. Moves start with
// as many 8 byte moves as possible, then
// 4, 2, or 1 byte(s) as remaining. This will
// work and be efficient for power8 or later.
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 8b5d6d94e8..7f933cb66e 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -373,6 +373,7 @@ var passes = [...]pass{
{name: "phiopt", fn: phiopt},
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
+ {name: "fuse plain", fn: fusePlain},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
@@ -380,7 +381,7 @@ var passes = [...]pass{
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
{name: "check bce", fn: checkbce},
{name: "branchelim", fn: branchelim},
- {name: "fuse", fn: fuse},
+ {name: "fuse", fn: fuseAll},
{name: "dse", fn: dse},
{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
{name: "insert resched checks", fn: insertLoopReschedChecks,
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 40008bcf87..e79629695a 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -178,6 +178,7 @@ type GCNode interface {
Typ() *types.Type
String() string
IsSynthetic() bool
+ IsAutoTmp() bool
StorageClass() StorageClass
}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index c1fbdcc517..8df8a94b76 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -153,8 +153,12 @@ var BlockEnd = &Value{
// RegisterSet is a bitmap of registers, indexed by Register.num.
type RegisterSet uint64
+// logf prints debug-specific logging to stdout (always stdout) if the current
+// function is tagged by GOSSAFUNC (for ssa output directed either to stdout or html).
func (s *debugState) logf(msg string, args ...interface{}) {
- s.f.Logf(msg, args...)
+ if s.f.PrintOrHtmlSSA {
+ fmt.Printf(msg, args...)
+ }
}
type debugState struct {
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 5832050a8a..b76410d597 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -86,6 +86,10 @@ func (d *DummyAuto) IsSynthetic() bool {
return false
}
+func (d *DummyAuto) IsAutoTmp() bool {
+ return true
+}
+
func (DummyFrontend) StringData(s string) interface{} {
return nil
}
@@ -163,7 +167,6 @@ func init() {
}
types.Dowidth = func(t *types.Type) {}
- types.Tptr = types.TPTR64
for _, typ := range [...]struct {
width int64
et types.EType
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index eb5775efcb..2ed4086fd1 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -37,9 +37,10 @@ type Func struct {
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
- logfiles map[string]writeSyncer
- HTMLWriter *HTMLWriter // html writer, for debugging
- DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
+ logfiles map[string]writeSyncer
+ HTMLWriter *HTMLWriter // html writer, for debugging
+ DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
+ PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false.
scheduled bool // Values in Blocks are in final order
NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index 4f9a2ad9ca..c451904124 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -8,15 +8,33 @@ import (
"cmd/internal/src"
)
+// fusePlain runs fuse(f, fuseTypePlain).
+func fusePlain(f *Func) { fuse(f, fuseTypePlain) }
+
+// fuseAll runs fuse(f, fuseTypeAll).
+func fuseAll(f *Func) { fuse(f, fuseTypeAll) }
+
+type fuseType uint8
+
+const (
+ fuseTypePlain fuseType = 1 << iota
+ fuseTypeIf
+ fuseTypeAll = fuseTypePlain | fuseTypeIf
+)
+
// fuse simplifies control flow by joining basic blocks.
-func fuse(f *Func) {
+func fuse(f *Func, typ fuseType) {
for changed := true; changed; {
changed = false
// Fuse from end to beginning, to avoid quadratic behavior in fuseBlockPlain. See issue 13554.
for i := len(f.Blocks) - 1; i >= 0; i-- {
b := f.Blocks[i]
- changed = fuseBlockIf(b) || changed
- changed = fuseBlockPlain(b) || changed
+ if typ&fuseTypeIf != 0 {
+ changed = fuseBlockIf(b) || changed
+ }
+ if typ&fuseTypePlain != 0 {
+ changed = fuseBlockPlain(b) || changed
+ }
}
}
}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index bba92f805e..c3e25a80c4 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -26,7 +26,7 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
- fuse(fun.f)
+ fuseAll(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -56,7 +56,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
- fuse(fun.f)
+ fuseAll(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -90,7 +90,7 @@ func TestFuseHandlesPhis(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
- fuse(fun.f)
+ fuseAll(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@@ -122,7 +122,7 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
))
CheckFunc(fun.f)
- fuse(fun.f)
+ fuseAll(fun.f)
for k, b := range fun.blocks {
if k[:1] == "z" && b.Kind != BlockInvalid {
@@ -162,7 +162,7 @@ func BenchmarkFuse(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
fun := c.Fun("entry", blocks...)
- fuse(fun.f)
+ fuseAll(fun.f)
}
})
}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index f6aa37e884..e1680ec37c 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -17,14 +17,17 @@
(Mul(32|64)F x y) -> (MULS(S|D) x y)
(Mul32uhilo x y) -> (MULLQU x y)
+(Select0 (Mul32uover x y)) -> (Select0 (MULLU x y))
+(Select1 (Mul32uover x y)) -> (SETO (Select1 (MULLU x y)))
+
(Avg32u x y) -> (AVGLU x y)
(Div32F x y) -> (DIVSS x y)
(Div64F x y) -> (DIVSD x y)
-(Div32 x y) -> (DIVL x y)
+(Div32 [a] x y) -> (DIVL [a] x y)
(Div32u x y) -> (DIVLU x y)
-(Div16 x y) -> (DIVW x y)
+(Div16 [a] x y) -> (DIVW [a] x y)
(Div16u x y) -> (DIVWU x y)
(Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y))
(Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
@@ -32,9 +35,9 @@
(Hmul32 x y) -> (HMULL x y)
(Hmul32u x y) -> (HMULLU x y)
-(Mod32 x y) -> (MODL x y)
+(Mod32 [a] x y) -> (MODL [a] x y)
(Mod32u x y) -> (MODLU x y)
-(Mod16 x y) -> (MODW x y)
+(Mod16 [a] x y) -> (MODW [a] x y)
(Mod16u x y) -> (MODWU x y)
(Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y))
(Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
@@ -369,6 +372,7 @@
(If (SETBE cmp) yes no) -> (ULE cmp yes no)
(If (SETA cmp) yes no) -> (UGT cmp yes no)
(If (SETAE cmp) yes no) -> (UGE cmp yes no)
+(If (SETO cmp) yes no) -> (OS cmp yes no)
// Special case for floating point - LF/LEF not generated
(If (SETGF cmp) yes no) -> (UGT cmp yes no)
@@ -398,6 +402,7 @@
(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no)
(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) -> (OS cmp yes no)
// Special case for floating point - LF/LEF not generated
(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no)
@@ -614,38 +619,39 @@
(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
// Don't extend before storing
-(MOVWstore [off] {sym} ptr (MOVWLSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVBLSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVWstore [off] {sym} ptr (MOVWLZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVBLZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) -> (MOVBstore [off] {sym} ptr x mem)
// fold constants into memory operations
// Note that this is not always a good idea because if not all the uses of
// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
// Nevertheless, let's do it!
-(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
-(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
-(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
-(MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
-(MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
-
-(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
-(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
-(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
-(MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
-(MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
+(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
+(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {sym} val base idx mem)
+((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) && is32Bit(off1+off2*4) ->
+ ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2*4] {sym} val base idx mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2] {sym} base idx val mem)
+((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) && is32Bit(off1+off2*4) ->
+ ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2*4] {sym} base idx val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) && ValAndOff(valoff1).canAdd(off2) ->
+ ((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
+((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) && ValAndOff(valoff1).canAdd(off2*4) ->
+ ((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
// Fold constants into stores.
(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
@@ -656,12 +662,8 @@
(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores.
-(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {s} ptr mem)
// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops.
@@ -671,97 +673,43 @@
// a separate instruction gives us that register. Having the LEAL be
// a separate instruction also allows it to be CSEd (which is good because
// it compiles to a thunk call).
-(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-
-(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ (MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
// generating indexed loads and stores
-(MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOV(B|W|L|SS|SD)load [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOV(B|W|L|SS|SD)loadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOV(L|SS)load [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOV(L|SS)loadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOV(B|W|L|SS|SD)store [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOV(B|W|L|SS|SD)storeidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOV(L|SS)store [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOV(L|SS)storeidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
@@ -771,35 +719,27 @@
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
+ && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ ((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
-(MOVBload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
-(MOVWload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
-(MOVLload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
-(MOVSSload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
-(MOVSDload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
-(MOVBstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
-(MOVWstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
-(MOVLstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
-(MOVSSstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
-(MOVSDstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
+(MOV(B|W|L|SS|SD)load [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOV(B|W|L|SS|SD)loadidx1 [off] {sym} ptr idx mem)
+(MOV(B|W|L|SS|SD)store [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOV(B|W|L|SS|SD)storeidx1 [off] {sym} ptr idx val mem)
-(MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOV(B|W|L)storeconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
+ (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
-(MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
-(MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
+(MOV(B|W|L)storeconst [x] {sym} (ADDL ptr idx) mem) -> (MOV(B|W|L)storeconstidx1 [x] {sym} ptr idx mem)
// combine SHLL into indexed loads and stores
(MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
@@ -810,76 +750,64 @@
(MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
// combine ADDL into indexed loads and stores
-(MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOV(B|W|L|SS|SD)loadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOV(B|W|L|SS|SD)loadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOV(L|SS)loadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOV(L|SS)loadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOV(B|W|L|SS|SD)storeidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOV(B|W|L|SS|SD)storeidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOV(L|SS)storeidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOV(L|SS)storeidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [int64(int32(c+2*d))] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOV(B|W|L|SS|SD)loadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOV(B|W|L|SS|SD)loadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [int64(int32(c+2*d))] {sym} ptr idx mem)
+(MOV(L|SS)loadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOV(L|SS)loadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
(MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
-(MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOV(B|W|L|SS|SD)storeidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOV(B|W|L|SS|SD)storeidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [int64(int32(c+2*d))] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOV(L|SS)storeidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOV(L|SS)storeidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
(MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
// Merge load/store to op
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
+((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) && canMergeLoad(v, l, x) && clobber(l) ->
+ ((ADD|AND|OR|XOR|SUB|MUL)Lloadidx4 x [off] {sym} ptr idx mem)
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|AND|OR|XOR)Lloadidx4 x [off] {sym} ptr idx mem) mem) && y.Uses==1 && clobber(y) ->
+ ((ADD|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx x mem)
+(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|SUB|AND|OR|XOR)L l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
+ ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx x mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
&& y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) ->
+ ((ADD|AND|OR|XOR)Lconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+((ADD|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) && validValAndOff(c,off) ->
+ ((ADD|AND|OR|XOR)Lconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+(SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) && validValAndOff(-c,off) ->
+ (ADDLconstmodifyidx4 [makeValAndOff(-c,off)] {sym} ptr idx mem)
-(MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
- (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
- (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOV(B|W|L)storeconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
+ (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
- (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
- (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
- (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOV(B|W|L)storeconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
+ (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
- (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
@@ -1199,11 +1127,21 @@
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
&& x.Uses == 1
@@ -1223,10 +1161,14 @@
-> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst [1] i) mem)
// Combine stores into larger (unaligned) stores.
-(MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHR(W|L)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstore [i] {s} p w mem)
(MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
&& x.Uses == 1
&& clobber(x)
@@ -1240,10 +1182,14 @@
&& clobber(x)
-> (MOVLstore [i-2] {s} p w0 mem)
-(MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+(MOVBstoreidx1 [i] {s} p idx (SHR(L|W)const [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVWstoreidx1 [i-1] {s} p idx w mem)
+(MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHR(L|W)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstoreidx1 [i] {s} p idx w mem)
(MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
&& x.Uses == 1
&& clobber(x)
@@ -1283,3 +1229,7 @@
(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int32(c)),off) -> (CMPLconstload {sym} [makeValAndOff(int64(int32(c)),off)] ptr mem)
(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.BigEndian))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(int32(read32(sym, off, config.BigEndian)))])
diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go
index 1786eea7cf..fa3e7cd375 100644
--- a/src/cmd/compile/internal/ssa/gen/386Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/386Ops.go
@@ -126,9 +126,10 @@ func init() {
readflags = regInfo{inputs: nil, outputs: gponly}
flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
- gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
- gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
- gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
@@ -206,6 +207,8 @@ func init() {
{name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+
{name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
@@ -213,15 +216,16 @@ func init() {
{name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits
- {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", clobberFlags: true}, // arg0 / arg1
- {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", clobberFlags: true}, // arg0 / arg1
- {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
- {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
+ // For DIVL, DIVW, MODL and MODW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
- {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", clobberFlags: true}, // arg0 % arg1
- {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", clobberFlags: true}, // arg0 % arg1
- {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
- {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
+ {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
{name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
{name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
@@ -281,6 +285,7 @@ func init() {
{name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
{name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+ // binary-op with a memory source operand
{name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
@@ -288,6 +293,14 @@ func init() {
{name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ // binary-op with an indexed memory source operand
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "MULLloadidx4", argLength: 4, reg: gp21loadidx, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+
// unary ops
{name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
@@ -316,6 +329,7 @@ func init() {
{name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
{name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
// Need different opcodes for floating point conditions because
// any comparison involving a NaN is always FALSE and thus
// the patterns for inverting conditions cannot be used.
@@ -367,12 +381,25 @@ func init() {
{name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
{name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+ // direct binary-op on indexed memory (read-modify-write)
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) += arg2, arg3=mem
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) &= arg2, arg3=mem
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) |= arg2, arg3=mem
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) ^= arg2, arg3=mem
+
// direct binary-op on memory with a constant (read-modify-write)
{name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ // direct binary-op on indexed memory with a constant (read-modify-write)
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+
// indexed loads/stores
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
@@ -530,6 +557,8 @@ func init() {
{name: "LE"},
{name: "GT"},
{name: "GE"},
+ {name: "OS"},
+ {name: "OC"},
{name: "ULT"},
{name: "ULE"},
{name: "UGT"},
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index f9ac5e4dce..86f7d921e4 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -16,10 +16,14 @@
(Mul(64|32|16|8) x y) -> (MUL(Q|L|L|L) x y)
(Mul(32|64)F x y) -> (MULS(S|D) x y)
+(Select0 (Mul64uover x y)) -> (Select0 (MULQU x y))
+(Select0 (Mul32uover x y)) -> (Select0 (MULLU x y))
+(Select1 (Mul(64|32)uover x y)) -> (SETO (Select1 (MUL(Q|L)U x y)))
+
(Hmul(64|32) x y) -> (HMUL(Q|L) x y)
(Hmul(64|32)u x y) -> (HMUL(Q|L)U x y)
-(Div(64|32|16) x y) -> (Select0 (DIV(Q|L|W) x y))
+(Div(64|32|16) [a] x y) -> (Select0 (DIV(Q|L|W) [a] x y))
(Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Div(64|32|16)u x y) -> (Select0 (DIV(Q|L|W)U x y))
(Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
@@ -30,7 +34,7 @@
(Avg64u x y) -> (AVGQU x y)
-(Mod(64|32|16) x y) -> (Select1 (DIV(Q|L|W) x y))
+(Mod(64|32|16) [a] x y) -> (Select1 (DIV(Q|L|W) [a] x y))
(Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Mod(64|32|16)u x y) -> (Select1 (DIV(Q|L|W)U x y))
(Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
@@ -480,6 +484,7 @@
(If (SETBE cmp) yes no) -> (ULE cmp yes no)
(If (SETA cmp) yes no) -> (UGT cmp yes no)
(If (SETAE cmp) yes no) -> (UGE cmp yes no)
+(If (SETO cmp) yes no) -> (OS cmp yes no)
// Special case for floating point - LF/LEF not generated
(If (SETGF cmp) yes no) -> (UGT cmp yes no)
@@ -542,6 +547,7 @@
(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no)
(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) -> (OS cmp yes no)
// Recognize bit tests: a&(1<
(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
-(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
+(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) ->
(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
+(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) ->
(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
-(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
+(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) ->
(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores.
@@ -2119,16 +2125,31 @@
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
&& config.useSSE
&& x.Uses == 1
@@ -2170,6 +2191,10 @@
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstore [i] {s} p w mem)
(MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem))
&& x.Uses == 1
&& clobber(x)
@@ -2487,3 +2512,8 @@
&& validValAndOff(0,off)
&& clobber(l) ->
@l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.BigEndian))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read32(sym, off, config.BigEndian))])
+(MOVQload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read64(sym, off, config.BigEndian))])
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 017c07071d..29f208f0d0 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -210,6 +210,9 @@ func init() {
{name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true}, // arg0 * auxint
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+ {name: "MULQU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt64,Flags)", asm: "MULQ", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x.
+
{name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "HMULQU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
@@ -217,9 +220,11 @@ func init() {
{name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
- {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
- {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
- {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ // For DIVQ, DIVL and DIVW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+
{name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
@@ -468,6 +473,7 @@ func init() {
{name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
{name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
// Variants that store result to memory
{name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
{name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
@@ -754,6 +760,8 @@ func init() {
{name: "LE"},
{name: "GT"},
{name: "GE"},
+ {name: "OS"},
+ {name: "OC"},
{name: "ULT"},
{name: "ULE"},
{name: "UGT"},
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index fdf4d1e900..8b0e82f154 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -1544,3 +1544,7 @@
(GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftLLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftRLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftRAreg x y z) yes no)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read8(sym, off))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read16(sym, off, config.BigEndian))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(int32(read32(sym, off, config.BigEndian)))])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 659081ec8b..1efce66016 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -2957,3 +2957,8 @@
(FSUBD a (FNMULD x y)) -> (FMADDD a x y)
(FSUBS (FNMULS x y) a) -> (FNMADDS a x y)
(FSUBD (FNMULD x y) a) -> (FNMADDD a x y)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read8(sym, off))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read16(sym, off, config.BigEndian))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read32(sym, off, config.BigEndian))])
+(MOVDload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read64(sym, off, config.BigEndian))])
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index 7d79c9ad50..0eaa88596b 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -297,6 +297,8 @@
(Ctz64 x) -> (POPCNTD (ANDN (ADDconst [-1] x) x))
(Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x)))
+(Ctz16 x) -> (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x)))
+(Ctz8 x) -> (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x)))
(BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD x))
(BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW x))
@@ -304,7 +306,7 @@
(PopCount64 x) -> (POPCNTD x)
(PopCount32 x) -> (POPCNTW (MOVWZreg x))
(PopCount16 x) -> (POPCNTW (MOVHZreg x))
-(PopCount8 x) -> (POPCNTB (MOVBreg x))
+(PopCount8 x) -> (POPCNTB (MOVBZreg x))
(And(64|32|16|8) x y) -> (AND x y)
(Or(64|32|16|8) x y) -> (OR x y)
@@ -894,16 +896,19 @@
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics
-(AtomicLoad(32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) ptr mem)
+(AtomicLoad(32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) [1] ptr mem)
+(AtomicLoadAcq32 ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem)
-(AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) ptr val mem)
+(AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) [1] ptr val mem)
+(AtomicStoreRel32 ptr val mem) -> (LoweredAtomicStore32 [0] ptr val mem)
//(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem)
(AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem)
(AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem)
-(AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) ptr old new_ mem)
+(AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
+(AtomicCompareAndSwapRel32 ptr old new_ mem) -> (LoweredAtomicCas32 [0] ptr old new_ mem)
(AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
(AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem)
@@ -956,7 +961,7 @@
(MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
-// Lose widening ops fed to to stores
+// Lose widening ops fed to stores
(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index c82f7312fe..ef0db69fb7 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -470,12 +470,12 @@ func init() {
faultOnNilArg1: true,
},
- {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", faultOnNilArg0: true, hasSideEffects: true},
- {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
- {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", clobberFlags: true, faultOnNilArg0: true},
- {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", clobberFlags: true, faultOnNilArg0: true},
- {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
// atomic add32, 64
// SYNC
@@ -516,8 +516,8 @@ func init() {
// BNE -4(PC)
// CBNZ Rtmp, -4(PC)
// CSET EQ, Rout
- {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
- {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic 8 and/or.
// *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index d490e32f3d..14a67846dc 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -816,7 +816,7 @@
// Decomposing StringMake and lowering of StringPtr and StringLen
// happens in a later pass, dec, so that these operations are available
// to other passes for optimizations.
-(StringPtr (StringMake (Const64 [c]) _)) -> (Const64 [c])
+(StringPtr (StringMake (Addr {s} base) _)) -> (Addr {s} base)
(StringLen (StringMake _ (Const64 [c]))) -> (Const64 [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
(StringMake (ConstNil) (Const32 [0]))
@@ -1799,3 +1799,17 @@
(Zero {t1} [n] dst mem)))))
(StaticCall {sym} x) && needRaceCleanup(sym,v) -> x
+
+// Collapse moving A -> B -> C into just A -> C.
+// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
+// This happens most commonly when B is an autotmp inserted earlier
+// during compilation to ensure correctness.
+(Move {t1} [s1] dst tmp1 midmem:(Move {t2} [s2] tmp2 src _))
+ && s1 == s2
+ && t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ -> (Move {t1} [s1] dst src midmem)
+
+// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
+// However, this rule is needed to prevent the previous rule from looping forever in such cases.
+(Move dst src mem) && isSamePtr(dst, src) -> mem
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index ee9c6fa0f6..7ff6da1b01 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -55,6 +55,9 @@ var genericOps = []opData{
{name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
{name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "Mul32uover", argLength: 2, typ: "(UInt32,Bool)", commutative: true}, // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x))
+ {name: "Mul64uover", argLength: 2, typ: "(UInt64,Bool)", commutative: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x))
+
// Weird special instructions for use in the strength reduction of divides.
// These ops compute unsigned (arg0 + arg1) / 2, correct to all
// 32/64 bits, even when the intermediate result of the add has 33/65 bits.
@@ -63,23 +66,26 @@ var genericOps = []opData{
{name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
{name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
+ // For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1
+ // or that the dividend is not the most negative value.
{name: "Div8", argLength: 2}, // arg0 / arg1, signed
{name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
- {name: "Div16", argLength: 2},
+ {name: "Div16", argLength: 2, aux: "Bool"},
{name: "Div16u", argLength: 2},
- {name: "Div32", argLength: 2},
+ {name: "Div32", argLength: 2, aux: "Bool"},
{name: "Div32u", argLength: 2},
- {name: "Div64", argLength: 2},
+ {name: "Div64", argLength: 2, aux: "Bool"},
{name: "Div64u", argLength: 2},
{name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+ // For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1.
{name: "Mod8", argLength: 2}, // arg0 % arg1, signed
{name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
- {name: "Mod16", argLength: 2},
+ {name: "Mod16", argLength: 2, aux: "Bool"},
{name: "Mod16u", argLength: 2},
- {name: "Mod32", argLength: 2},
+ {name: "Mod32", argLength: 2, aux: "Bool"},
{name: "Mod32u", argLength: 2},
- {name: "Mod64", argLength: 2},
+ {name: "Mod64", argLength: 2, aux: "Bool"},
{name: "Mod64u", argLength: 2},
{name: "And8", argLength: 2, commutative: true}, // arg0 & arg1
@@ -506,20 +512,23 @@ var genericOps = []opData{
// Atomic loads return a new memory so that the loads are properly ordered
// with respect to other loads and stores.
// TODO: use for sync/atomic at some point.
- {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
- {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
- {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
- {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
- {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
- {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
- {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
- {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
- {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
- {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
- {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true iff store happens and new memory.
- {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true iff store happens and new memory.
- {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
- {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, returns true if store happens and new memory.
+ {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
// Atomic operation variants
// These variants have the same semantics as above atomic operations.
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index b7d5f912db..499fda5af5 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -109,6 +109,12 @@ code, pre, .lines, .ast {
font-size: 12px;
}
+pre {
+ -moz-tab-size: 4;
+ -o-tab-size: 4;
+ tab-size: 4;
+}
+
.allow-x-scroll {
overflow-x: scroll;
}
diff --git a/src/cmd/compile/internal/ssa/layout.go b/src/cmd/compile/internal/ssa/layout.go
index 15e111ae7c..78d5dc77fe 100644
--- a/src/cmd/compile/internal/ssa/layout.go
+++ b/src/cmd/compile/internal/ssa/layout.go
@@ -12,7 +12,7 @@ func layout(f *Func) {
}
// Register allocation may use a different order which has constraints
-// imposed by the linear-scan algorithm. Note that that f.pass here is
+// imposed by the linear-scan algorithm. Note that f.pass here is
// regalloc, so the switch is conditional on -d=ssa/regalloc/test=N
func layoutRegallocOrder(f *Func) []*Block {
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index 815c4a5047..b2f5cae088 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
@@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
- fuse(fun.f)
+ fusePlain(fun.f)
deadcode(fun.f)
CheckFunc(fun.f)
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 2145c6e723..1435caf26a 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -22,6 +22,8 @@ const (
Block386LE
Block386GT
Block386GE
+ Block386OS
+ Block386OC
Block386ULT
Block386ULE
Block386UGT
@@ -37,6 +39,8 @@ const (
BlockAMD64LE
BlockAMD64GT
BlockAMD64GE
+ BlockAMD64OS
+ BlockAMD64OC
BlockAMD64ULT
BlockAMD64ULE
BlockAMD64UGT
@@ -130,6 +134,8 @@ var blockString = [...]string{
Block386LE: "LE",
Block386GT: "GT",
Block386GE: "GE",
+ Block386OS: "OS",
+ Block386OC: "OC",
Block386ULT: "ULT",
Block386ULE: "ULE",
Block386UGT: "UGT",
@@ -145,6 +151,8 @@ var blockString = [...]string{
BlockAMD64LE: "LE",
BlockAMD64GT: "GT",
BlockAMD64GE: "GE",
+ BlockAMD64OS: "OS",
+ BlockAMD64OC: "OC",
BlockAMD64ULT: "ULT",
BlockAMD64ULE: "ULE",
BlockAMD64UGT: "UGT",
@@ -278,6 +286,7 @@ const (
Op386SBBLconst
Op386MULL
Op386MULLconst
+ Op386MULLU
Op386HMULL
Op386HMULLU
Op386MULLQU
@@ -339,6 +348,12 @@ const (
Op386ANDLload
Op386ORLload
Op386XORLload
+ Op386ADDLloadidx4
+ Op386SUBLloadidx4
+ Op386MULLloadidx4
+ Op386ANDLloadidx4
+ Op386ORLloadidx4
+ Op386XORLloadidx4
Op386NEGL
Op386NOTL
Op386BSFL
@@ -358,6 +373,7 @@ const (
Op386SETBE
Op386SETA
Op386SETAE
+ Op386SETO
Op386SETEQF
Op386SETNEF
Op386SETORD
@@ -394,10 +410,19 @@ const (
Op386ANDLmodify
Op386ORLmodify
Op386XORLmodify
+ Op386ADDLmodifyidx4
+ Op386SUBLmodifyidx4
+ Op386ANDLmodifyidx4
+ Op386ORLmodifyidx4
+ Op386XORLmodifyidx4
Op386ADDLconstmodify
Op386ANDLconstmodify
Op386ORLconstmodify
Op386XORLconstmodify
+ Op386ADDLconstmodifyidx4
+ Op386ANDLconstmodifyidx4
+ Op386ORLconstmodifyidx4
+ Op386XORLconstmodifyidx4
Op386MOVBloadidx1
Op386MOVWloadidx1
Op386MOVWloadidx2
@@ -485,6 +510,8 @@ const (
OpAMD64MULL
OpAMD64MULQconst
OpAMD64MULLconst
+ OpAMD64MULLU
+ OpAMD64MULQU
OpAMD64HMULQ
OpAMD64HMULL
OpAMD64HMULQU
@@ -690,6 +717,7 @@ const (
OpAMD64SETBE
OpAMD64SETA
OpAMD64SETAE
+ OpAMD64SETO
OpAMD64SETEQstore
OpAMD64SETNEstore
OpAMD64SETLstore
@@ -2068,6 +2096,8 @@ const (
OpHmul64u
OpMul32uhilo
OpMul64uhilo
+ OpMul32uover
+ OpMul64uover
OpAvg32u
OpAvg64u
OpDiv8
@@ -2379,15 +2409,18 @@ const (
OpAtomicLoad32
OpAtomicLoad64
OpAtomicLoadPtr
+ OpAtomicLoadAcq32
OpAtomicStore32
OpAtomicStore64
OpAtomicStorePtrNoWB
+ OpAtomicStoreRel32
OpAtomicExchange32
OpAtomicExchange64
OpAtomicAdd32
OpAtomicAdd64
OpAtomicCompareAndSwap32
OpAtomicCompareAndSwap64
+ OpAtomicCompareAndSwapRel32
OpAtomicAnd8
OpAtomicOr8
OpAtomicAdd32Variant
@@ -3099,6 +3132,24 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
{
name: "HMULL",
argLen: 2,
@@ -3168,6 +3219,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "DIVL",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVL,
@@ -3184,6 +3236,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "DIVW",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVW,
@@ -3232,6 +3285,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "MODL",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVL,
@@ -3248,6 +3302,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "MODW",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVW,
@@ -4019,6 +4074,126 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
{
name: "NEGL",
argLen: 1,
@@ -4243,6 +4418,16 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
{
name: "SETEQF",
argLen: 1,
@@ -4743,6 +4928,86 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
{
name: "ADDLconstmodify",
auxType: auxSymValAndOff,
@@ -4799,6 +5064,66 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
{
name: "MOVBloadidx1",
auxType: auxSymOff,
@@ -5996,6 +6321,42 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MULQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
{
name: "HMULQ",
argLen: 2,
@@ -6082,6 +6443,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "DIVQ",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVQ,
@@ -6098,6 +6460,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "DIVL",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVL,
@@ -6114,6 +6477,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "DIVW",
+ auxType: auxBool,
argLen: 2,
clobberFlags: true,
asm: x86.AIDIVW,
@@ -9018,6 +9382,16 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
{
name: "SETEQstore",
auxType: auxSymOff,
@@ -23078,6 +23452,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicStore32",
+ auxType: auxInt64,
argLen: 3,
faultOnNilArg0: true,
hasSideEffects: true,
@@ -23090,6 +23465,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicStore64",
+ auxType: auxInt64,
argLen: 3,
faultOnNilArg0: true,
hasSideEffects: true,
@@ -23102,6 +23478,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicLoad32",
+ auxType: auxInt64,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
@@ -23116,6 +23493,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicLoad64",
+ auxType: auxInt64,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
@@ -23130,6 +23508,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicLoadPtr",
+ auxType: auxInt64,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
@@ -23212,6 +23591,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicCas64",
+ auxType: auxInt64,
argLen: 4,
resultNotInArgs: true,
clobberFlags: true,
@@ -23230,6 +23610,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "LoweredAtomicCas32",
+ auxType: auxInt64,
argLen: 4,
resultNotInArgs: true,
clobberFlags: true,
@@ -27624,6 +28005,18 @@ var opcodeTable = [...]opInfo{
commutative: true,
generic: true,
},
+ {
+ name: "Mul32uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
{
name: "Avg32u",
argLen: 2,
@@ -27646,6 +28039,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "Div16",
+ auxType: auxBool,
argLen: 2,
generic: true,
},
@@ -27656,6 +28050,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "Div32",
+ auxType: auxBool,
argLen: 2,
generic: true,
},
@@ -27666,6 +28061,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "Div64",
+ auxType: auxBool,
argLen: 2,
generic: true,
},
@@ -27691,6 +28087,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "Mod16",
+ auxType: auxBool,
argLen: 2,
generic: true,
},
@@ -27701,6 +28098,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "Mod32",
+ auxType: auxBool,
argLen: 2,
generic: true,
},
@@ -27711,6 +28109,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "Mod64",
+ auxType: auxBool,
argLen: 2,
generic: true,
},
@@ -29312,6 +29711,11 @@ var opcodeTable = [...]opInfo{
argLen: 2,
generic: true,
},
+ {
+ name: "AtomicLoadAcq32",
+ argLen: 2,
+ generic: true,
+ },
{
name: "AtomicStore32",
argLen: 3,
@@ -29330,6 +29734,12 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
generic: true,
},
+ {
+ name: "AtomicStoreRel32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
{
name: "AtomicExchange32",
argLen: 3,
@@ -29366,6 +29776,12 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
generic: true,
},
+ {
+ name: "AtomicCompareAndSwapRel32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
{
name: "AtomicAnd8",
argLen: 3,
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index 37b607977c..0e0e2789b1 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -114,7 +114,7 @@ type posetNode struct {
// given that non-equality is not transitive, the only effect is that a later call
// to SetEqual for the same values will fail. NonEqual checks whether it is known that
// the nodes are different, either because SetNonEqual was called before, or because
-// we know that that they are strictly ordered.
+// we know that they are strictly ordered.
//
// It is implemented as a forest of DAGs; in each DAG, if node A dominates B,
// it means that A MinIntNN.
+ divr := v.Args[1]
+ divrLim, divrLimok := ft.limits[divr.ID]
+ divd := v.Args[0]
+ divdLim, divdLimok := ft.limits[divd.ID]
+ if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) ||
+ (divdLimok && divdLim.min > mostNegativeDividend[v.Op]) {
+ v.AuxInt = 1 // see NeedsFixUp in genericOps - v.AuxInt = 0 means we have not proved
+ // that the divisor is not -1 and the dividend is not the most negative,
+ // so we need to add fix-up code.
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op)
+ }
+ }
}
}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index fd5f684eda..ae6af1c269 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -7,7 +7,9 @@ package ssa
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "cmd/internal/objabi"
"cmd/internal/src"
+ "encoding/binary"
"fmt"
"io"
"math"
@@ -449,6 +451,16 @@ func extend32Fto64F(f float32) float64 {
return math.Float64frombits(r)
}
+// NeedsFixUp reports whether the division needs fix-up code.
+func NeedsFixUp(v *Value) bool {
+ return v.AuxInt == 0
+}
+
+// i2f is used in rules for converting from an AuxInt to a float.
+func i2f(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 {
return int64(math.Float64bits(f))
@@ -1090,3 +1102,45 @@ func needRaceCleanup(sym interface{}, v *Value) bool {
}
return true
}
+
+// symIsRO reports whether sym is a read-only global.
+func symIsRO(sym interface{}) bool {
+ lsym := sym.(*obj.LSym)
+ return lsym.Type == objabi.SRODATA && len(lsym.R) == 0
+}
+
+// read8 reads one byte from the read-only global sym at offset off.
+func read8(sym interface{}, off int64) uint8 {
+ lsym := sym.(*obj.LSym)
+ return lsym.P[off]
+}
+
+// read16 reads two bytes from the read-only global sym at offset off.
+func read16(sym interface{}, off int64, bigEndian bool) uint16 {
+ lsym := sym.(*obj.LSym)
+ if bigEndian {
+ return binary.BigEndian.Uint16(lsym.P[off:])
+ } else {
+ return binary.LittleEndian.Uint16(lsym.P[off:])
+ }
+}
+
+// read32 reads four bytes from the read-only global sym at offset off.
+func read32(sym interface{}, off int64, bigEndian bool) uint32 {
+ lsym := sym.(*obj.LSym)
+ if bigEndian {
+ return binary.BigEndian.Uint32(lsym.P[off:])
+ } else {
+ return binary.LittleEndian.Uint32(lsym.P[off:])
+ }
+}
+
+// read64 reads eight bytes from the read-only global sym at offset off.
+func read64(sym interface{}, off int64, bigEndian bool) uint64 {
+ lsym := sym.(*obj.LSym)
+ if bigEndian {
+ return binary.BigEndian.Uint64(lsym.P[off:])
+ } else {
+ return binary.LittleEndian.Uint64(lsym.P[off:])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index 5481b4e773..70aa51f3d1 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -25,10 +25,16 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386ADDLconst_0(v)
case Op386ADDLconstmodify:
return rewriteValue386_Op386ADDLconstmodify_0(v)
+ case Op386ADDLconstmodifyidx4:
+ return rewriteValue386_Op386ADDLconstmodifyidx4_0(v)
case Op386ADDLload:
return rewriteValue386_Op386ADDLload_0(v)
+ case Op386ADDLloadidx4:
+ return rewriteValue386_Op386ADDLloadidx4_0(v)
case Op386ADDLmodify:
return rewriteValue386_Op386ADDLmodify_0(v)
+ case Op386ADDLmodifyidx4:
+ return rewriteValue386_Op386ADDLmodifyidx4_0(v)
case Op386ADDSD:
return rewriteValue386_Op386ADDSD_0(v)
case Op386ADDSDload:
@@ -43,10 +49,16 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386ANDLconst_0(v)
case Op386ANDLconstmodify:
return rewriteValue386_Op386ANDLconstmodify_0(v)
+ case Op386ANDLconstmodifyidx4:
+ return rewriteValue386_Op386ANDLconstmodifyidx4_0(v)
case Op386ANDLload:
return rewriteValue386_Op386ANDLload_0(v)
+ case Op386ANDLloadidx4:
+ return rewriteValue386_Op386ANDLloadidx4_0(v)
case Op386ANDLmodify:
return rewriteValue386_Op386ANDLmodify_0(v)
+ case Op386ANDLmodifyidx4:
+ return rewriteValue386_Op386ANDLmodifyidx4_0(v)
case Op386CMPB:
return rewriteValue386_Op386CMPB_0(v)
case Op386CMPBconst:
@@ -94,13 +106,13 @@ func rewriteValue386(v *Value) bool {
case Op386MOVBloadidx1:
return rewriteValue386_Op386MOVBloadidx1_0(v)
case Op386MOVBstore:
- return rewriteValue386_Op386MOVBstore_0(v)
+ return rewriteValue386_Op386MOVBstore_0(v) || rewriteValue386_Op386MOVBstore_10(v)
case Op386MOVBstoreconst:
return rewriteValue386_Op386MOVBstoreconst_0(v)
case Op386MOVBstoreconstidx1:
return rewriteValue386_Op386MOVBstoreconstidx1_0(v)
case Op386MOVBstoreidx1:
- return rewriteValue386_Op386MOVBstoreidx1_0(v) || rewriteValue386_Op386MOVBstoreidx1_10(v)
+ return rewriteValue386_Op386MOVBstoreidx1_0(v) || rewriteValue386_Op386MOVBstoreidx1_10(v) || rewriteValue386_Op386MOVBstoreidx1_20(v)
case Op386MOVLload:
return rewriteValue386_Op386MOVLload_0(v)
case Op386MOVLloadidx1:
@@ -118,7 +130,7 @@ func rewriteValue386(v *Value) bool {
case Op386MOVLstoreidx1:
return rewriteValue386_Op386MOVLstoreidx1_0(v)
case Op386MOVLstoreidx4:
- return rewriteValue386_Op386MOVLstoreidx4_0(v)
+ return rewriteValue386_Op386MOVLstoreidx4_0(v) || rewriteValue386_Op386MOVLstoreidx4_10(v)
case Op386MOVSDconst:
return rewriteValue386_Op386MOVSDconst_0(v)
case Op386MOVSDload:
@@ -177,6 +189,8 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386MULLconst_0(v) || rewriteValue386_Op386MULLconst_10(v) || rewriteValue386_Op386MULLconst_20(v) || rewriteValue386_Op386MULLconst_30(v)
case Op386MULLload:
return rewriteValue386_Op386MULLload_0(v)
+ case Op386MULLloadidx4:
+ return rewriteValue386_Op386MULLloadidx4_0(v)
case Op386MULSD:
return rewriteValue386_Op386MULSD_0(v)
case Op386MULSDload:
@@ -195,10 +209,16 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386ORLconst_0(v)
case Op386ORLconstmodify:
return rewriteValue386_Op386ORLconstmodify_0(v)
+ case Op386ORLconstmodifyidx4:
+ return rewriteValue386_Op386ORLconstmodifyidx4_0(v)
case Op386ORLload:
return rewriteValue386_Op386ORLload_0(v)
+ case Op386ORLloadidx4:
+ return rewriteValue386_Op386ORLloadidx4_0(v)
case Op386ORLmodify:
return rewriteValue386_Op386ORLmodify_0(v)
+ case Op386ORLmodifyidx4:
+ return rewriteValue386_Op386ORLmodifyidx4_0(v)
case Op386ROLBconst:
return rewriteValue386_Op386ROLBconst_0(v)
case Op386ROLLconst:
@@ -265,8 +285,12 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386SUBLconst_0(v)
case Op386SUBLload:
return rewriteValue386_Op386SUBLload_0(v)
+ case Op386SUBLloadidx4:
+ return rewriteValue386_Op386SUBLloadidx4_0(v)
case Op386SUBLmodify:
return rewriteValue386_Op386SUBLmodify_0(v)
+ case Op386SUBLmodifyidx4:
+ return rewriteValue386_Op386SUBLmodifyidx4_0(v)
case Op386SUBSD:
return rewriteValue386_Op386SUBSD_0(v)
case Op386SUBSDload:
@@ -281,10 +305,16 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386XORLconst_0(v)
case Op386XORLconstmodify:
return rewriteValue386_Op386XORLconstmodify_0(v)
+ case Op386XORLconstmodifyidx4:
+ return rewriteValue386_Op386XORLconstmodifyidx4_0(v)
case Op386XORLload:
return rewriteValue386_Op386XORLload_0(v)
+ case Op386XORLloadidx4:
+ return rewriteValue386_Op386XORLloadidx4_0(v)
case Op386XORLmodify:
return rewriteValue386_Op386XORLmodify_0(v)
+ case Op386XORLmodifyidx4:
+ return rewriteValue386_Op386XORLmodifyidx4_0(v)
case OpAdd16:
return rewriteValue386_OpAdd16_0(v)
case OpAdd32:
@@ -607,6 +637,10 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_OpRsh8x64_0(v)
case OpRsh8x8:
return rewriteValue386_OpRsh8x8_0(v)
+ case OpSelect0:
+ return rewriteValue386_OpSelect0_0(v)
+ case OpSelect1:
+ return rewriteValue386_OpSelect1_0(v)
case OpSignExt16to32:
return rewriteValue386_OpSignExt16to32_0(v)
case OpSignExt8to16:
@@ -1314,6 +1348,62 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (ADDLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ADDLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (ADDLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ADDLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDL x (NEGL y))
// cond:
// result: (SUBL x y)
@@ -1621,6 +1711,92 @@ func rewriteValue386_Op386ADDLconstmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ADDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2*4)
+ // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386ADDLload_0(v *Value) bool {
b := v.Block
_ = b
@@ -1679,6 +1855,128 @@ func rewriteValue386_Op386ADDLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (ADDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ ptr := v_1.Args[0]
+ idx := v_1.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386ADDLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ADDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ADDLloadidx4 [off1+off2] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386ADDLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (ADDLloadidx4 [off1+off2*4] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ base := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_2.AuxInt
+ idx := v_2.Args[0]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386ADDLloadidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386ADDLmodify_0(v *Value) bool {
@@ -1741,6 +2039,124 @@ func rewriteValue386_Op386ADDLmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ADDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ADDLmodifyidx4 [off1+off2] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386ADDLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (ADDLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386ADDLmodifyidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
+ // cond: validValAndOff(c,off)
+ // result: (ADDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386MOVLconst {
+ break
+ }
+ c := v_2.AuxInt
+ mem := v.Args[3]
+ if !(validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386ADDSD_0(v *Value) bool {
b := v.Block
_ = b
@@ -2064,6 +2480,62 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (ANDLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ANDLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (ANDLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ANDLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
// match: (ANDL x x)
// cond:
// result: x
@@ -2195,6 +2667,92 @@ func rewriteValue386_Op386ANDLconstmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2*4)
+ // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386ANDLload_0(v *Value) bool {
b := v.Block
_ = b
@@ -2253,6 +2811,128 @@ func rewriteValue386_Op386ANDLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (ANDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ ptr := v_1.Args[0]
+ idx := v_1.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386ANDLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ANDLloadidx4 [off1+off2] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386ANDLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (ANDLloadidx4 [off1+off2*4] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ base := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_2.AuxInt
+ idx := v_2.Args[0]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386ANDLloadidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386ANDLmodify_0(v *Value) bool {
@@ -2315,6 +2995,124 @@ func rewriteValue386_Op386ANDLmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ANDLmodifyidx4 [off1+off2] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386ANDLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (ANDLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386ANDLmodifyidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
+ // cond: validValAndOff(c,off)
+ // result: (ANDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386MOVLconst {
+ break
+ }
+ c := v_2.AuxInt
+ mem := v.Args[3]
+ if !(validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386CMPB_0(v *Value) bool {
b := v.Block
_ = b
@@ -4448,6 +5246,24 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(read8(sym, off))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(read8(sym, off))
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool {
@@ -4729,6 +5545,51 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHRWconst {
+ break
+ }
+ if v_1.AuxInt != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v.Args[2]
+ if x.Op != Op386MOVBstore {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ if w != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
@@ -4774,6 +5635,99 @@ func rewriteValue386_Op386MOVBstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != Op386MOVBstore {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRWconst {
+ break
+ }
+ if x_1.AuxInt != 8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstore_10(v *Value) bool {
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != Op386MOVBstore {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRLconst {
+ break
+ }
+ if x_1.AuxInt != 8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem)
@@ -4962,6 +5916,37 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ for {
+ a := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[1]
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool {
@@ -5350,6 +6335,612 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRWconst {
+ break
+ }
+ if v_2.AuxInt != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRWconst {
+ break
+ }
+ if v_2.AuxInt != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if idx != x.Args[0] {
+ break
+ }
+ if p != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool {
+ // match: (MOVBstoreidx1 [i] {s} idx p (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ idx := v.Args[0]
+ p := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRWconst {
+ break
+ }
+ if v_2.AuxInt != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} idx p (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ idx := v.Args[0]
+ p := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRWconst {
+ break
+ }
+ if v_2.AuxInt != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if idx != x.Args[0] {
+ break
+ }
+ if p != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRLconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if idx != x.Args[0] {
+ break
+ }
+ if p != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRLconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ idx := v.Args[0]
+ p := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRLconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ idx := v.Args[0]
+ p := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if idx != x.Args[0] {
+ break
+ }
+ if p != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRLconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRWconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if idx != x.Args[0] {
+ break
+ }
+ if p != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRWconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ idx := v.Args[0]
+ p := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRWconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[3]
+ idx := v.Args[0]
+ p := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[3]
+ if idx != x.Args[0] {
+ break
+ }
+ if p != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != Op386SHRWconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreidx1_20(v *Value) bool {
// match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
@@ -5460,9 +7051,6 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool {
// match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
@@ -5736,6 +7324,24 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(int32(read32(sym, off, config.BigEndian)))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(int32(read32(sym, off, config.BigEndian)))
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool {
@@ -7279,6 +8885,797 @@ func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLloadidx4 x [off] {sym} ptr idx mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ADDLloadidx4 {
+ break
+ }
+ if y.AuxInt != off {
+ break
+ }
+ if y.Aux != sym {
+ break
+ }
+ _ = y.Args[3]
+ x := y.Args[0]
+ if ptr != y.Args[1] {
+ break
+ }
+ if idx != y.Args[2] {
+ break
+ }
+ mem := y.Args[3]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ADDLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLloadidx4 x [off] {sym} ptr idx mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ANDLloadidx4 {
+ break
+ }
+ if y.AuxInt != off {
+ break
+ }
+ if y.Aux != sym {
+ break
+ }
+ _ = y.Args[3]
+ x := y.Args[0]
+ if ptr != y.Args[1] {
+ break
+ }
+ if idx != y.Args[2] {
+ break
+ }
+ mem := y.Args[3]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ANDLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLloadidx4 x [off] {sym} ptr idx mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ORLloadidx4 {
+ break
+ }
+ if y.AuxInt != off {
+ break
+ }
+ if y.Aux != sym {
+ break
+ }
+ _ = y.Args[3]
+ x := y.Args[0]
+ if ptr != y.Args[1] {
+ break
+ }
+ if idx != y.Args[2] {
+ break
+ }
+ mem := y.Args[3]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ORLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLloadidx4 x [off] {sym} ptr idx mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386XORLloadidx4 {
+ break
+ }
+ if y.AuxInt != off {
+ break
+ }
+ if y.Aux != sym {
+ break
+ }
+ _ = y.Args[3]
+ x := y.Args[0]
+ if ptr != y.Args[1] {
+ break
+ }
+ if idx != y.Args[2] {
+ break
+ }
+ mem := y.Args[3]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386XORLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ADDL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ x := y.Args[1]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ADDLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ADDL {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ADDLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(SUBL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (SUBLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386SUBL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ x := y.Args[1]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ANDL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ x := y.Args[1]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ANDLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool {
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ANDL {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ANDLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ORL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ x := y.Args[1]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ORLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ORL {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ORLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386XORL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ x := y.Args[1]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386XORLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386XORL {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(Op386XORLmodifyidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off)
+ // result: (ADDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ADDLconst {
+ break
+ }
+ c := y.AuxInt
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off)
+ // result: (ANDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ANDLconst {
+ break
+ }
+ c := y.AuxInt
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off)
+ // result: (ORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386ORLconst {
+ break
+ }
+ c := y.AuxInt
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386ORLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off)
+ // result: (XORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ y := v.Args[2]
+ if y.Op != Op386XORLconst {
+ break
+ }
+ c := y.AuxInt
+ l := y.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[2]
+ if ptr != l.Args[0] {
+ break
+ }
+ if idx != l.Args[1] {
+ break
+ }
+ mem := l.Args[2]
+ if mem != v.Args[3] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386XORLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVSDconst_0(v *Value) bool {
@@ -8674,6 +11071,24 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(read16(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVWloadidx1_0(v *Value) bool {
@@ -9345,6 +11760,37 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ for {
+ a := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[1]
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool {
@@ -10378,6 +12824,62 @@ func rewriteValue386_Op386MULL_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MULL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (MULLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386MULLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MULL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (MULLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386MULLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386MULLconst_0(v *Value) bool {
@@ -10943,6 +13445,128 @@ func rewriteValue386_Op386MULLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MULLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MULLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ ptr := v_1.Args[0]
+ idx := v_1.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MULLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (MULLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MULLloadidx4 [off1+off2] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MULLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MULLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (MULLloadidx4 [off1+off2*4] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ base := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_2.AuxInt
+ idx := v_2.Args[0]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386MULLloadidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MULLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386MULSD_0(v *Value) bool {
@@ -11473,6 +14097,62 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (ORLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ORLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (ORLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386ORLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
// match: (ORL x x)
// cond:
// result: x
@@ -11943,6 +14623,11 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValue386_Op386ORL_20(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
// cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem)
@@ -12051,11 +14736,6 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValue386_Op386ORL_20(v *Value) bool {
- b := v.Block
- _ = b
// match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
// cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem)
@@ -12578,6 +15258,11 @@ func rewriteValue386_Op386ORL_20(v *Value) bool {
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValue386_Op386ORL_30(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -12746,11 +15431,6 @@ func rewriteValue386_Op386ORL_20(v *Value) bool {
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValue386_Op386ORL_30(v *Value) bool {
- b := v.Block
- _ = b
// match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -13423,6 +16103,11 @@ func rewriteValue386_Op386ORL_30(v *Value) bool {
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValue386_Op386ORL_40(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -13591,11 +16276,6 @@ func rewriteValue386_Op386ORL_30(v *Value) bool {
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValue386_Op386ORL_40(v *Value) bool {
- b := v.Block
- _ = b
// match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -14268,6 +16948,11 @@ func rewriteValue386_Op386ORL_40(v *Value) bool {
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValue386_Op386ORL_50(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -14436,11 +17121,6 @@ func rewriteValue386_Op386ORL_40(v *Value) bool {
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValue386_Op386ORL_50(v *Value) bool {
- b := v.Block
- _ = b
// match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -15130,6 +17810,92 @@ func rewriteValue386_Op386ORLconstmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(Op386ORLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2*4)
+ // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
+ break
+ }
+ v.reset(Op386ORLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386ORLload_0(v *Value) bool {
b := v.Block
_ = b
@@ -15188,6 +17954,128 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (ORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ ptr := v_1.Args[0]
+ idx := v_1.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386ORLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ORLloadidx4 [off1+off2] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386ORLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (ORLloadidx4 [off1+off2*4] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ base := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_2.AuxInt
+ idx := v_2.Args[0]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386ORLloadidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386ORLmodify_0(v *Value) bool {
@@ -15250,6 +18138,124 @@ func rewriteValue386_Op386ORLmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ORLmodifyidx4 [off1+off2] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386ORLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (ORLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386ORLmodifyidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
+ // cond: validValAndOff(c,off)
+ // result: (ORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386MOVLconst {
+ break
+ }
+ c := v_2.AuxInt
+ mem := v.Args[3]
+ if !(validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386ORLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386ROLBconst_0(v *Value) bool {
// match: (ROLBconst [c] (ROLBconst [d] x))
// cond:
@@ -16637,6 +19643,34 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (SUBLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
// match: (SUBL x x)
// cond:
// result: (MOVLconst [0])
@@ -16756,6 +19790,128 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (SUBLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ ptr := v_1.Args[0]
+ idx := v_1.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386SUBLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SUBLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBLloadidx4 [off1+off2] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386SUBLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (SUBLloadidx4 [off1+off2*4] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ base := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_2.AuxInt
+ idx := v_2.Args[0]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386SUBLloadidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386SUBLmodify_0(v *Value) bool {
@@ -16818,6 +19974,124 @@ func rewriteValue386_Op386SUBLmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SUBLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBLmodifyidx4 [off1+off2] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386SUBLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (SUBLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386SUBLmodifyidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
+ // cond: validValAndOff(-c,off)
+ // result: (ADDLconstmodifyidx4 [makeValAndOff(-c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386MOVLconst {
+ break
+ }
+ c := v_2.AuxInt
+ mem := v.Args[3]
+ if !(validValAndOff(-c, off)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(-c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386SUBSD_0(v *Value) bool {
b := v.Block
_ = b
@@ -17258,6 +20532,62 @@ func rewriteValue386_Op386XORL_0(v *Value) bool {
return false
}
func rewriteValue386_Op386XORL_10(v *Value) bool {
+ // match: (XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (XORLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386XORLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (XORLloadidx4 x [off] {sym} ptr idx mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != Op386MOVLloadidx4 {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[2]
+ ptr := l.Args[0]
+ idx := l.Args[1]
+ mem := l.Args[2]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386XORLloadidx4)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
// match: (XORL x x)
// cond:
// result: (MOVLconst [0])
@@ -17376,6 +20706,92 @@ func rewriteValue386_Op386XORLconstmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(Op386XORLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2*4)
+ // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
+ break
+ }
+ v.reset(Op386XORLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLconstmodifyidx4)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386XORLload_0(v *Value) bool {
b := v.Block
_ = b
@@ -17434,6 +20850,128 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (XORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (XORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ ptr := v_1.Args[0]
+ idx := v_1.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386XORLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XORLloadidx4 [off1+off2] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386XORLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (XORLloadidx4 [off1+off2*4] {sym} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ base := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_2.AuxInt
+ idx := v_2.Args[0]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386XORLloadidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ idx := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386XORLmodify_0(v *Value) bool {
@@ -17496,6 +21034,124 @@ func rewriteValue386_Op386XORLmodify_0(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XORLmodifyidx4 [off1+off2] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386XORLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
+ // cond: is32Bit(off1+off2*4)
+ // result: (XORLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ base := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2*4)) {
+ break
+ }
+ v.reset(Op386XORLmodifyidx4)
+ v.AuxInt = off1 + off2*4
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLmodifyidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
+ // cond: validValAndOff(c,off)
+ // result: (XORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386MOVLconst {
+ break
+ }
+ c := v_2.AuxInt
+ mem := v.Args[3]
+ if !(validValAndOff(c, off)) {
+ break
+ }
+ v.reset(Op386XORLconstmodifyidx4)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_OpAdd16_0(v *Value) bool {
// match: (Add16 x y)
// cond:
@@ -17898,14 +21554,16 @@ func rewriteValue386_OpCvt64Fto32F_0(v *Value) bool {
}
}
func rewriteValue386_OpDiv16_0(v *Value) bool {
- // match: (Div16 x y)
+ // match: (Div16 [a] x y)
// cond:
- // result: (DIVW x y)
+ // result: (DIVW [a] x y)
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(Op386DIVW)
+ v.AuxInt = a
v.AddArg(x)
v.AddArg(y)
return true
@@ -17926,14 +21584,16 @@ func rewriteValue386_OpDiv16u_0(v *Value) bool {
}
}
func rewriteValue386_OpDiv32_0(v *Value) bool {
- // match: (Div32 x y)
+ // match: (Div32 [a] x y)
// cond:
- // result: (DIVL x y)
+ // result: (DIVL [a] x y)
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(Op386DIVL)
+ v.AuxInt = a
v.AddArg(x)
v.AddArg(y)
return true
@@ -19301,14 +22961,16 @@ func rewriteValue386_OpLsh8x8_0(v *Value) bool {
}
}
func rewriteValue386_OpMod16_0(v *Value) bool {
- // match: (Mod16 x y)
+ // match: (Mod16 [a] x y)
// cond:
- // result: (MODW x y)
+ // result: (MODW [a] x y)
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(Op386MODW)
+ v.AuxInt = a
v.AddArg(x)
v.AddArg(y)
return true
@@ -19329,14 +22991,16 @@ func rewriteValue386_OpMod16u_0(v *Value) bool {
}
}
func rewriteValue386_OpMod32_0(v *Value) bool {
- // match: (Mod32 x y)
+ // match: (Mod32 [a] x y)
// cond:
- // result: (MODL x y)
+ // result: (MODL [a] x y)
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(Op386MODL)
+ v.AuxInt = a
v.AddArg(x)
v.AddArg(y)
return true
@@ -20858,6 +24522,59 @@ func rewriteValue386_OpRsh8x8_0(v *Value) bool {
return true
}
}
+func rewriteValue386_OpSelect0_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (Select0 (Mul32uover x y))
+ // cond:
+ // result: (Select0 (MULLU x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect1_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (Select1 (Mul32uover x y))
+ // cond:
+ // result: (SETO (Select1 (MULLU x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(Op386SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValue386_OpSignExt16to32_0(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
@@ -21996,6 +25713,20 @@ func rewriteBlock386(b *Block) bool {
b.Aux = nil
return true
}
+ // match: (If (SETO cmp) yes no)
+ // cond:
+ // result: (OS cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETO {
+ break
+ }
+ cmp := v.Args[0]
+ b.Kind = Block386OS
+ b.SetControl(cmp)
+ b.Aux = nil
+ return true
+ }
// match: (If (SETGF cmp) yes no)
// cond:
// result: (UGT cmp yes no)
@@ -22753,6 +26484,58 @@ func rewriteBlock386(b *Block) bool {
b.Aux = nil
return true
}
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // cond:
+ // result: (OS cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETO {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETO {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ b.Kind = Block386OS
+ b.SetControl(cmp)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // cond:
+ // result: (OS cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETO {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETO {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ b.Kind = Block386OS
+ b.SetControl(cmp)
+ b.Aux = nil
+ return true
+ }
// match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
// cond:
// result: (UGT cmp yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 89fc6780b9..09d17e00c8 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -248,7 +248,7 @@ func rewriteValueAMD64(v *Value) bool {
case OpAMD64MOVBloadidx1:
return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v)
case OpAMD64MOVBstore:
- return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v)
+ return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v)
case OpAMD64MOVBstoreconst:
return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v)
case OpAMD64MOVBstoreconstidx1:
@@ -268,7 +268,7 @@ func rewriteValueAMD64(v *Value) bool {
case OpAMD64MOVLi2f:
return rewriteValueAMD64_OpAMD64MOVLi2f_0(v)
case OpAMD64MOVLload:
- return rewriteValueAMD64_OpAMD64MOVLload_0(v)
+ return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v)
case OpAMD64MOVLloadidx1:
return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v)
case OpAMD64MOVLloadidx4:
@@ -12446,6 +12446,24 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(read8(sym, off))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int64(read8(sym, off))
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
@@ -12953,6 +12971,30 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = makeValAndOff(int64(int8(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@@ -13181,6 +13223,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
// result: (MOVQstore [i-7] {s} p (BSWAPQ w) mem)
@@ -13372,13 +13421,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
@@ -13514,6 +13556,141 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpAMD64MOVBstore {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRWconst {
+ break
+ }
+ if x_1.AuxInt != 8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpAMD64MOVBstore {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if x_1.AuxInt != 8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpAMD64MOVBstore {
+ break
+ }
+ if x.AuxInt != i+1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if x_1.AuxInt != 8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem)
@@ -13681,6 +13858,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool {
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@@ -13868,6 +14048,37 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ for {
+ a := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64MOVBstoreconst {
+ break
+ }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[1]
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
@@ -15449,6 +15660,31 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read32(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64(read32(sym, off, config.BigEndian))
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
// cond:
@@ -15957,6 +16193,30 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = makeValAndOff(int64(int32(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@@ -16102,6 +16362,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVQstore [i-4] {s} p w mem)
@@ -16147,13 +16414,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVQstore [i-4] {s} p w0 mem)
@@ -16519,6 +16779,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
// match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
// result: (ADDLmodify [off] {sym} ptr x mem)
@@ -16562,9 +16825,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
// match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
// result: (SUBLmodify [off] {sym} ptr x mem)
@@ -16952,6 +17212,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
// match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
// result: (BTSLmodify [off] {sym} ptr x mem)
@@ -16995,9 +17258,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
@@ -17462,6 +17722,40 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+ for {
+ a := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64MOVLstoreconst {
+ break
+ }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[1]
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = ValAndOff(a).Off()
+ v.Aux = s
+ v.AddArg(p)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
@@ -18481,6 +18775,10 @@ func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool {
return false
}
func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
@@ -18713,6 +19011,24 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
v.AddArg(val)
return true
}
+ // match: (MOVQload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read64(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64(read64(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
@@ -22529,6 +22845,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool {
return false
}
func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVWQZX x)
@@ -22733,6 +23053,24 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int64(read16(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
@@ -23114,6 +23452,30 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = makeValAndOff(int64(int16(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@@ -23274,6 +23636,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w mem)
@@ -23319,13 +23688,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w0 mem)
@@ -23708,6 +24070,37 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ for {
+ a := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64MOVWstoreconst {
+ break
+ }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ _ = x.Args[1]
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
@@ -59398,15 +59791,17 @@ func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (Div16 x y)
+ // match: (Div16 [a] x y)
// cond:
- // result: (Select0 (DIVW x y))
+ // result: (Select0 (DIVW [a] x y))
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = a
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -59438,15 +59833,17 @@ func rewriteValueAMD64_OpDiv32_0(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (Div32 x y)
+ // match: (Div32 [a] x y)
// cond:
- // result: (Select0 (DIVL x y))
+ // result: (Select0 (DIVL [a] x y))
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = a
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -59492,15 +59889,17 @@ func rewriteValueAMD64_OpDiv64_0(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (Div64 x y)
+ // match: (Div64 [a] x y)
// cond:
- // result: (Select0 (DIVQ x y))
+ // result: (Select0 (DIVQ [a] x y))
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = a
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -61578,15 +61977,17 @@ func rewriteValueAMD64_OpMod16_0(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (Mod16 x y)
+ // match: (Mod16 [a] x y)
// cond:
- // result: (Select1 (DIVW x y))
+ // result: (Select1 (DIVW [a] x y))
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = a
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -61618,15 +62019,17 @@ func rewriteValueAMD64_OpMod32_0(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (Mod32 x y)
+ // match: (Mod32 [a] x y)
// cond:
- // result: (Select1 (DIVL x y))
+ // result: (Select1 (DIVL [a] x y))
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = a
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -61658,15 +62061,17 @@ func rewriteValueAMD64_OpMod64_0(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (Mod64 x y)
+ // match: (Mod64 [a] x y)
// cond:
- // result: (Select1 (DIVQ x y))
+ // result: (Select1 (DIVQ [a] x y))
for {
+ a := v.AuxInt
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = a
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -64393,6 +64798,46 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool {
func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
b := v.Block
_ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (Select0 (Mul64uover x y))
+ // cond:
+ // result: (Select0 (MULQU x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Mul32uover x y))
+ // cond:
+ // result: (Select0 (MULLU x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
// match: (Select0 (AddTupleFirst32 val tuple))
// cond:
// result: (ADDL val (Select0 tuple))
@@ -64434,6 +64879,50 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
return false
}
func rewriteValueAMD64_OpSelect1_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (Select1 (Mul64uover x y))
+ // cond:
+ // result: (SETO (Select1 (MULQU x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Mul32uover x y))
+ // cond:
+ // result: (SETO (Select1 (MULLU x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
// match: (Select1 (AddTupleFirst32 _ tuple))
// cond:
// result: (Select1 tuple)
@@ -66598,6 +67087,20 @@ func rewriteBlockAMD64(b *Block) bool {
b.Aux = nil
return true
}
+ // match: (If (SETO cmp) yes no)
+ // cond:
+ // result: (OS cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpAMD64SETO {
+ break
+ }
+ cmp := v.Args[0]
+ b.Kind = BlockAMD64OS
+ b.SetControl(cmp)
+ b.Aux = nil
+ return true
+ }
// match: (If (SETGF cmp) yes no)
// cond:
// result: (UGT cmp yes no)
@@ -67355,6 +67858,58 @@ func rewriteBlockAMD64(b *Block) bool {
b.Aux = nil
return true
}
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // cond:
+ // result: (OS cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTB {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETO {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SETO {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ b.Kind = BlockAMD64OS
+ b.SetControl(cmp)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // cond:
+ // result: (OS cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTB {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETO {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SETO {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ b.Kind = BlockAMD64OS
+ b.SetControl(cmp)
+ b.Aux = nil
+ return true
+ }
// match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
// cond: !config.nacl
// result: (ULT (BTL x y))
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 966413ab25..4f6f61544e 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -6883,6 +6883,24 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int64(read8(sym, off))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(read8(sym, off))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool {
@@ -7953,6 +7971,24 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int64(read16(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(read16(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool {
@@ -8797,6 +8833,24 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int64(int32(read32(sym, off, config.BigEndian)))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(read32(sym, off, config.BigEndian)))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool {
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index ba38ae0505..b9bb109b9e 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -8934,6 +8934,24 @@ func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool {
v.AuxInt = 0
return true
}
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read8(sym, off))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(read8(sym, off))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool {
@@ -12638,6 +12656,24 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool {
v.AuxInt = 0
return true
}
+ // match: (MOVDload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read64(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(read64(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool {
@@ -13563,6 +13599,24 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool {
v.AuxInt = 0
return true
}
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read16(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(read16(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool {
@@ -16183,6 +16237,24 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool {
v.AuxInt = 0
return true
}
+ // match: (MOVWUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read32(sym, off, config.BigEndian))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpSB {
+ break
+ }
+ if !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(read32(sym, off, config.BigEndian))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool {
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 9aff3106db..dad036d3d2 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -53,6 +53,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v)
case OpAtomicCompareAndSwap64:
return rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v)
+ case OpAtomicCompareAndSwapRel32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwapRel32_0(v)
case OpAtomicExchange32:
return rewriteValuePPC64_OpAtomicExchange32_0(v)
case OpAtomicExchange64:
@@ -61,6 +63,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValuePPC64_OpAtomicLoad64_0(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValuePPC64_OpAtomicLoadAcq32_0(v)
case OpAtomicLoadPtr:
return rewriteValuePPC64_OpAtomicLoadPtr_0(v)
case OpAtomicOr8:
@@ -69,6 +73,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpAtomicStore32_0(v)
case OpAtomicStore64:
return rewriteValuePPC64_OpAtomicStore64_0(v)
+ case OpAtomicStoreRel32:
+ return rewriteValuePPC64_OpAtomicStoreRel32_0(v)
case OpAvg64u:
return rewriteValuePPC64_OpAvg64u_0(v)
case OpBitLen32:
@@ -105,6 +111,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpConstNil_0(v)
case OpCopysign:
return rewriteValuePPC64_OpCopysign_0(v)
+ case OpCtz16:
+ return rewriteValuePPC64_OpCtz16_0(v)
case OpCtz32:
return rewriteValuePPC64_OpCtz32_0(v)
case OpCtz32NonZero:
@@ -113,6 +121,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpCtz64_0(v)
case OpCtz64NonZero:
return rewriteValuePPC64_OpCtz64NonZero_0(v)
+ case OpCtz8:
+ return rewriteValuePPC64_OpCtz8_0(v)
case OpCvt32Fto32:
return rewriteValuePPC64_OpCvt32Fto32_0(v)
case OpCvt32Fto64:
@@ -927,7 +937,7 @@ func rewriteValuePPC64_OpAtomicAnd8_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
- // result: (LoweredAtomicCas32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [1] ptr old new_ mem)
for {
_ = v.Args[3]
ptr := v.Args[0]
@@ -935,6 +945,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool {
new_ := v.Args[2]
mem := v.Args[3]
v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = 1
v.AddArg(ptr)
v.AddArg(old)
v.AddArg(new_)
@@ -945,7 +956,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v *Value) bool {
// match: (AtomicCompareAndSwap64 ptr old new_ mem)
// cond:
- // result: (LoweredAtomicCas64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 [1] ptr old new_ mem)
for {
_ = v.Args[3]
ptr := v.Args[0]
@@ -953,6 +964,26 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v *Value) bool {
new_ := v.Args[2]
mem := v.Args[3]
v.reset(OpPPC64LoweredAtomicCas64)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v.AddArg(old)
+ v.AddArg(new_)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwapRel32_0(v *Value) bool {
+ // match: (AtomicCompareAndSwapRel32 ptr old new_ mem)
+ // cond:
+ // result: (LoweredAtomicCas32 [0] ptr old new_ mem)
+ for {
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ old := v.Args[1]
+ new_ := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = 0
v.AddArg(ptr)
v.AddArg(old)
v.AddArg(new_)
@@ -995,12 +1026,13 @@ func rewriteValuePPC64_OpAtomicExchange64_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicLoad32_0(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
- // result: (LoweredAtomicLoad32 ptr mem)
+ // result: (LoweredAtomicLoad32 [1] ptr mem)
for {
_ = v.Args[1]
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = 1
v.AddArg(ptr)
v.AddArg(mem)
return true
@@ -1009,12 +1041,28 @@ func rewriteValuePPC64_OpAtomicLoad32_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicLoad64_0(v *Value) bool {
// match: (AtomicLoad64 ptr mem)
// cond:
- // result: (LoweredAtomicLoad64 ptr mem)
+ // result: (LoweredAtomicLoad64 [1] ptr mem)
for {
_ = v.Args[1]
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq32_0(v *Value) bool {
+ // match: (AtomicLoadAcq32 ptr mem)
+ // cond:
+ // result: (LoweredAtomicLoad32 [0] ptr mem)
+ for {
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = 0
v.AddArg(ptr)
v.AddArg(mem)
return true
@@ -1023,12 +1071,13 @@ func rewriteValuePPC64_OpAtomicLoad64_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicLoadPtr_0(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
- // result: (LoweredAtomicLoadPtr ptr mem)
+ // result: (LoweredAtomicLoadPtr [1] ptr mem)
for {
_ = v.Args[1]
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpPPC64LoweredAtomicLoadPtr)
+ v.AuxInt = 1
v.AddArg(ptr)
v.AddArg(mem)
return true
@@ -1053,13 +1102,14 @@ func rewriteValuePPC64_OpAtomicOr8_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicStore32_0(v *Value) bool {
// match: (AtomicStore32 ptr val mem)
// cond:
- // result: (LoweredAtomicStore32 ptr val mem)
+ // result: (LoweredAtomicStore32 [1] ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = 1
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
@@ -1069,13 +1119,31 @@ func rewriteValuePPC64_OpAtomicStore32_0(v *Value) bool {
func rewriteValuePPC64_OpAtomicStore64_0(v *Value) bool {
// match: (AtomicStore64 ptr val mem)
// cond:
- // result: (LoweredAtomicStore64 ptr val mem)
+ // result: (LoweredAtomicStore64 [1] ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel32_0(v *Value) bool {
+ // match: (AtomicStoreRel32 ptr val mem)
+ // cond:
+ // result: (LoweredAtomicStore32 [0] ptr val mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = 0
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
@@ -1323,6 +1391,29 @@ func rewriteValuePPC64_OpCopysign_0(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpCtz16_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (Ctz16 x)
+ // cond:
+ // result: (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x)))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16)
+ v2.AuxInt = -1
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValuePPC64_OpCtz32_0(v *Value) bool {
b := v.Block
_ = b
@@ -1389,6 +1480,29 @@ func rewriteValuePPC64_OpCtz64NonZero_0(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpCtz8_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (Ctz8 x)
+ // cond:
+ // result: (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x)))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8)
+ v2.AuxInt = -1
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool {
b := v.Block
_ = b
@@ -26653,11 +26767,11 @@ func rewriteValuePPC64_OpPopCount8_0(v *Value) bool {
_ = typ
// match: (PopCount8 x)
// cond:
- // result: (POPCNTB (MOVBreg x))
+ // result: (POPCNTB (MOVBZreg x))
for {
x := v.Args[0]
v.reset(OpPPC64POPCNTB)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 2f239faa49..f0a1346acf 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -17353,6 +17353,51 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool {
v.AddArg(v1)
return true
}
+ // match: (Move {t1} [s1] dst tmp1 midmem:(Move {t2} [s2] tmp2 src _))
+ // cond: s1 == s2 && t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2)
+ // result: (Move {t1} [s1] dst src midmem)
+ for {
+ s1 := v.AuxInt
+ t1 := v.Aux
+ _ = v.Args[2]
+ dst := v.Args[0]
+ tmp1 := v.Args[1]
+ midmem := v.Args[2]
+ if midmem.Op != OpMove {
+ break
+ }
+ s2 := midmem.AuxInt
+ t2 := midmem.Aux
+ _ = midmem.Args[2]
+ tmp2 := midmem.Args[0]
+ src := midmem.Args[1]
+ if !(s1 == s2 && t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = s1
+ v.Aux = t1
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(midmem)
+ return true
+ }
+ // match: (Move dst src mem)
+ // cond: isSamePtr(dst, src)
+ // result: mem
+ for {
+ _ = v.Args[2]
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(isSamePtr(dst, src)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpMul16_0(v *Value) bool {
@@ -28748,9 +28793,9 @@ func rewriteValuegeneric_OpStringLen_0(v *Value) bool {
return false
}
func rewriteValuegeneric_OpStringPtr_0(v *Value) bool {
- // match: (StringPtr (StringMake (Const64 [c]) _))
+ // match: (StringPtr (StringMake (Addr {s} base) _))
// cond:
- // result: (Const64 [c])
+ // result: (Addr {s} base)
for {
v_0 := v.Args[0]
if v_0.Op != OpStringMake {
@@ -28758,14 +28803,16 @@ func rewriteValuegeneric_OpStringPtr_0(v *Value) bool {
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst64 {
+ if v_0_0.Op != OpAddr {
break
}
t := v_0_0.Type
- c := v_0_0.AuxInt
- v.reset(OpConst64)
+ s := v_0_0.Aux
+ base := v_0_0.Args[0]
+ v.reset(OpAddr)
v.Type = t
- v.AuxInt = c
+ v.Aux = s
+ v.AddArg(base)
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index f1783a9532..9e19bb85b0 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -8,6 +8,7 @@ import "container/heap"
const (
ScorePhi = iota // towards top of block
+ ScoreArg
ScoreNilCheck
ScoreReadTuple
ScoreVarDef
@@ -113,6 +114,9 @@ func schedule(f *Func) {
case v.Op == OpVarDef:
// We want all the vardefs next.
score[v.ID] = ScoreVarDef
+ case v.Op == OpArg:
+ // We want all the args as early as possible, for better debugging.
+ score[v.ID] = ScoreArg
case v.Type.IsMemory():
// Schedule stores as early as possible. This tends to
// reduce register pressure. It also helps make sure
diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go
index f7af85446b..546da8348d 100644
--- a/src/cmd/compile/internal/ssa/sparsetree.go
+++ b/src/cmd/compile/internal/ssa/sparsetree.go
@@ -98,9 +98,9 @@ func (t SparseTree) treestructure1(b *Block, i int) string {
s := "\n" + strings.Repeat("\t", i) + b.String() + "->["
for i, e := range b.Succs {
if i > 0 {
- s = s + ","
+ s += ","
}
- s = s + e.b.String()
+ s += e.b.String()
}
s += "]"
if c0 := t[b.ID].child; c0 != nil {
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 8d0ab93c64..7612585136 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -212,7 +212,7 @@ func (s *stackAllocState) stackalloc() {
h := f.getHome(id)
if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
// A variable can interfere with itself.
- // It is rare, but but it can happen.
+ // It is rare, but it can happen.
s.nSelfInterfere++
goto noname
}
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
index 6586f243e2..8664ea77c4 100644
--- a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
@@ -120,17 +120,17 @@ t = 0
a = 3
n = 0
t = 0
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
91: n += a
90: t += i * a
-92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
86: for i, a := range hist {
87: if a == 0 { //gdb-opt=(a,n,t)
a = 3
n = 3
t = 3
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
91: n += a
90: t += i * a
-92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
86: for i, a := range hist {
87: if a == 0 { //gdb-opt=(a,n,t)
a = 0
@@ -141,17 +141,17 @@ t = 9
a = 2
n = 6
t = 9
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
91: n += a
90: t += i * a
-92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
86: for i, a := range hist {
87: if a == 0 { //gdb-opt=(a,n,t)
a = 1
n = 8
t = 17
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
91: n += a
90: t += i * a
-92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
86: for i, a := range hist {
87: if a == 0 { //gdb-opt=(a,n,t)
a = 0
diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go
index a03e2734d2..56e97c71d8 100644
--- a/src/cmd/compile/internal/syntax/branches.go
+++ b/src/cmd/compile/internal/syntax/branches.go
@@ -77,7 +77,7 @@ func (ls *labelScope) declare(b *block, s *LabeledStmt) *label {
labels = make(map[string]*label)
ls.labels = labels
} else if alt := labels[name]; alt != nil {
- ls.err(s.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String())
+ ls.err(s.Label.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String())
return alt
}
l := &label{b, s, false}
diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/etype_string.go
index 503a30d0b4..f234a31fd0 100644
--- a/src/cmd/compile/internal/types/etype_string.go
+++ b/src/cmd/compile/internal/types/etype_string.go
@@ -4,9 +4,9 @@ package types
import "strconv"
-const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTR32PTR64FUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSDDDFIELDSSATUPLENTYPE"
+const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSDDDFIELDSSATUPLENTYPE"
-var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 101, 106, 110, 115, 120, 126, 130, 133, 138, 142, 145, 151, 160, 165, 168, 173, 181, 189, 197, 200, 205, 210}
+var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 190, 193, 198, 203}
func (i EType) String() string {
if i >= EType(len(_EType_index)-1) {
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
new file mode 100644
index 0000000000..2152485257
--- /dev/null
+++ b/src/cmd/compile/internal/types/identity.go
@@ -0,0 +1,119 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// Identical reports whether t1 and t2 are identical types, following
+// the spec rules. Receiver parameter types are ignored.
+func Identical(t1, t2 *Type) bool {
+ return identical(t1, t2, true, nil)
+}
+
+// IdenticalIgnoreTags is like Identical, but it ignores struct tags
+// for struct identity.
+func IdenticalIgnoreTags(t1, t2 *Type) bool {
+ return identical(t1, t2, false, nil)
+}
+
+type typePair struct {
+ t1 *Type
+ t2 *Type
+}
+
+func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
+ if t1 == t2 {
+ return true
+ }
+ if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
+ return false
+ }
+ if t1.Sym != nil || t2.Sym != nil {
+ // Special case: we keep byte/uint8 and rune/int32
+ // separate for error messages. Treat them as equal.
+ switch t1.Etype {
+ case TUINT8:
+ return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype)
+ case TINT32:
+ return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype)
+ default:
+ return false
+ }
+ }
+
+ // Any cyclic type must go through a named type, and if one is
+ // named, it is only identical to the other if they are the
+ // same pointer (t1 == t2), so there's no chance of chasing
+ // cycles ad infinitum, so no need for a depth counter.
+ if assumedEqual == nil {
+ assumedEqual = make(map[typePair]struct{})
+ } else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
+ return true
+ }
+ assumedEqual[typePair{t1, t2}] = struct{}{}
+
+ switch t1.Etype {
+ case TINTER:
+ if t1.NumFields() != t2.NumFields() {
+ return false
+ }
+ for i, f1 := range t1.FieldSlice() {
+ f2 := t2.Field(i)
+ if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ return false
+ }
+ }
+ return true
+
+ case TSTRUCT:
+ if t1.NumFields() != t2.NumFields() {
+ return false
+ }
+ for i, f1 := range t1.FieldSlice() {
+ f2 := t2.Field(i)
+ if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ return false
+ }
+ if cmpTags && f1.Note != f2.Note {
+ return false
+ }
+ }
+ return true
+
+ case TFUNC:
+ // Check parameters and result parameters for type equality.
+ // We intentionally ignore receiver parameters for type
+ // equality, because they're never relevant.
+ for _, f := range ParamsResults {
+ // Loop over fields in structs, ignoring argument names.
+ fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
+ if len(fs1) != len(fs2) {
+ return false
+ }
+ for i, f1 := range fs1 {
+ f2 := fs2[i]
+ if f1.Isddd() != f2.Isddd() || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ return false
+ }
+ }
+ }
+ return true
+
+ case TARRAY:
+ if t1.NumElem() != t2.NumElem() {
+ return false
+ }
+
+ case TCHAN:
+ if t1.ChanDir() != t2.ChanDir() {
+ return false
+ }
+
+ case TMAP:
+ if !identical(t1.Key(), t2.Key(), cmpTags, assumedEqual) {
+ return false
+ }
+ }
+
+ return identical(t1.Elem(), t2.Elem(), cmpTags, assumedEqual)
+}
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index e27c1fdba3..e502b986ae 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -135,7 +135,7 @@ func InternString(b []byte) string {
return s
}
-// CleanroomDo invokes f in an environment with with no preexisting packages.
+// CleanroomDo invokes f in an environment with no preexisting packages.
// For testing of import/export only.
func CleanroomDo(f func()) {
saved := pkgMap
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 25f8f826e6..e6e6127405 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -43,9 +43,7 @@ const (
TBOOL
- TPTR32
- TPTR64
-
+ TPTR
TFUNC
TSLICE
TARRAY
@@ -137,7 +135,7 @@ type Type struct {
// TFUNCARGS: FuncArgs
// TCHANARGS: ChanArgs
// TCHAN: *Chan
- // TPTR32, TPTR64: Ptr
+ // TPTR: Ptr
// TARRAY: *Array
// TSLICE: Slice
Extra interface{}
@@ -461,7 +459,7 @@ func New(et EType) *Type {
t.Extra = new(Struct)
case TINTER:
t.Extra = new(Interface)
- case TPTR32, TPTR64:
+ case TPTR:
t.Extra = Ptr{}
case TCHANARGS:
t.Extra = ChanArgs{}
@@ -560,11 +558,7 @@ func NewPtr(elem *Type) *Type {
return t
}
- if Tptr == 0 {
- Fatalf("NewPtr: Tptr not initialized")
- }
-
- t := New(Tptr)
+ t := New(TPTR)
t.Extra = Ptr{Elem: elem}
t.Width = int64(Widthptr)
t.Align = uint8(Widthptr)
@@ -619,7 +613,7 @@ func SubstAny(t *Type, types *[]*Type) *Type {
t = (*types)[0]
*types = (*types)[1:]
- case TPTR32, TPTR64:
+ case TPTR:
elem := SubstAny(t.Elem(), types)
if elem != t.Elem() {
t = t.copy()
@@ -790,7 +784,7 @@ func (t *Type) Key() *Type {
// Usable with pointers, channels, arrays, slices, and maps.
func (t *Type) Elem() *Type {
switch t.Etype {
- case TPTR32, TPTR64:
+ case TPTR:
return t.Extra.(Ptr).Elem
case TARRAY:
return t.Extra.(*Array).Elem
@@ -1101,7 +1095,7 @@ func (t *Type) cmp(x *Type) Cmp {
}
return t.Elem().cmp(x.Elem())
- case TPTR32, TPTR64, TSLICE:
+ case TPTR, TSLICE:
// No special cases for these, they are handled
// by the general code after the switch.
@@ -1199,7 +1193,7 @@ func (t *Type) cmp(x *Type) Cmp {
panic(e)
}
- // Common element type comparison for TARRAY, TCHAN, TPTR32, TPTR64, and TSLICE.
+ // Common element type comparison for TARRAY, TCHAN, TPTR, and TSLICE.
return t.Elem().cmp(x.Elem())
}
@@ -1261,7 +1255,7 @@ func (t *Type) IsComplex() bool {
// IsPtr reports whether t is a regular Go pointer type.
// This does not include unsafe.Pointer.
func (t *Type) IsPtr() bool {
- return t.Etype == TPTR32 || t.Etype == TPTR64
+ return t.Etype == TPTR
}
// IsUnsafePtr reports whether t is an unsafe pointer.
@@ -1275,7 +1269,7 @@ func (t *Type) IsUnsafePtr() bool {
// that consist of a single pointer shaped type.
// TODO(mdempsky): Should it? See golang.org/issue/15028.
func (t *Type) IsPtrShaped() bool {
- return t.Etype == TPTR32 || t.Etype == TPTR64 || t.Etype == TUNSAFEPTR ||
+ return t.Etype == TPTR || t.Etype == TUNSAFEPTR ||
t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
}
@@ -1449,7 +1443,7 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool {
}
return false
- case TPTR32, TPTR64, TSLICE:
+ case TPTR, TSLICE:
return !(ignoreNotInHeap && t.Elem().NotInHeap())
case TTUPLE:
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
index 0eac402f8e..caaeb889fb 100644
--- a/src/cmd/compile/internal/types/utils.go
+++ b/src/cmd/compile/internal/types/utils.go
@@ -11,9 +11,6 @@ import (
const BADWIDTH = -1000000000
-// Initialized by frontend. Exists only here.
-var Tptr EType // either TPTR32 or TPTR64
-
// The following variables must be initialized early by the frontend.
// They are here to break import cycles.
// TODO(gri) eliminate these dependencies.
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index e0bb4418ec..24ba9649be 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -198,24 +198,31 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW ||
v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW {
- var c *obj.Prog
+ if ssa.NeedsFixUp(v) {
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ c = s.Prog(x86.ACMPL)
+ j = s.Prog(x86.AJEQ)
+
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ c = s.Prog(x86.ACMPW)
+ j = s.Prog(x86.AJEQ)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+ }
+ // sign extend the dividend
switch v.Op {
case ssa.Op386DIVL, ssa.Op386MODL:
- c = s.Prog(x86.ACMPL)
- j = s.Prog(x86.AJEQ)
- s.Prog(x86.ACDQ) //TODO: fix
-
+ s.Prog(x86.ACDQ)
case ssa.Op386DIVW, ssa.Op386MODW:
- c = s.Prog(x86.ACMPW)
- j = s.Prog(x86.AJEQ)
s.Prog(x86.ACWD)
}
- c.From.Type = obj.TYPE_REG
- c.From.Reg = x
- c.To.Type = obj.TYPE_CONST
- c.To.Offset = -1
-
- j.To.Type = obj.TYPE_BRANCH
}
// for unsigned ints, we sign extend by setting DX = 0
@@ -278,6 +285,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
m.To.Reg = x86.REG_DX
}
+ case ssa.Op386MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
case ssa.Op386MULLQU:
// AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
p := s.Prog(v.Op.Asm())
@@ -484,47 +498,43 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- case ssa.Op386MOVSDloadidx8:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
- p.From.Scale = 8
- p.From.Index = v.Args[1].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = v.Reg()
- case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
- p.From.Scale = 4
- p.From.Index = v.Args[1].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = v.Reg()
- case ssa.Op386MOVWloadidx2:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
- p.From.Scale = 2
- p.From.Index = v.Args[1].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = v.Reg()
- case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
+ ssa.Op386MOVSDloadidx8, ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4, ssa.Op386MOVWloadidx2:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
- if i == x86.REG_SP {
- r, i = i, r
- }
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Scale = 1
+ case ssa.Op386MOVSDloadidx8:
+ p.From.Scale = 8
+ case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
+ p.From.Scale = 4
+ case ssa.Op386MOVWloadidx2:
+ p.From.Scale = 2
+ }
p.From.Reg = r
- p.From.Scale = 1
p.From.Index = i
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
+ ssa.Op386ANDLloadidx4, ssa.Op386ORLloadidx4, ssa.Op386XORLloadidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Index = v.Args[2].Reg()
+ p.From.Scale = 4
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload,
ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload,
ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload,
@@ -573,45 +583,30 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
- case ssa.Op386MOVSDstoreidx8:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_REG
- p.From.Reg = v.Args[2].Reg()
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- p.To.Scale = 8
- p.To.Index = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
- case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_REG
- p.From.Reg = v.Args[2].Reg()
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- p.To.Scale = 4
- p.To.Index = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
- case ssa.Op386MOVWstoreidx2:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_REG
- p.From.Reg = v.Args[2].Reg()
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- p.To.Scale = 2
- p.To.Index = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
- case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
+ ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
- if i == x86.REG_SP {
- r, i = i, r
- }
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.To.Scale = 1
+ case ssa.Op386MOVSDstoreidx8:
+ p.To.Scale = 8
+ case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ p.To.Scale = 4
+ case ssa.Op386MOVWstoreidx2:
+ p.To.Scale = 2
+ }
p.To.Reg = r
- p.To.Scale = 1
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
@@ -622,7 +617,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
- case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1:
+ case ssa.Op386ADDLconstmodifyidx4:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Scale = 4
+ p.To.Index = v.Args[1].Reg()
+ gc.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1,
+ ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
@@ -637,7 +652,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.Op386MOVWstoreconstidx2:
p.To.Scale = 2
- case ssa.Op386MOVLstoreconstidx4:
+ case ssa.Op386MOVLstoreconstidx4,
+ ssa.Op386ADDLconstmodifyidx4, ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
p.To.Scale = 4
}
p.To.Type = obj.TYPE_MEM
@@ -768,7 +784,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.Op386SETGF, ssa.Op386SETGEF,
ssa.Op386SETB, ssa.Op386SETBE,
ssa.Op386SETORD, ssa.Op386SETNAN,
- ssa.Op386SETA, ssa.Op386SETAE:
+ ssa.Op386SETA, ssa.Op386SETAE,
+ ssa.Op386SETO:
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -840,6 +857,8 @@ var blockJump = [...]struct {
ssa.Block386GE: {x86.AJGE, x86.AJLT},
ssa.Block386LE: {x86.AJLE, x86.AJGT},
ssa.Block386GT: {x86.AJGT, x86.AJLE},
+ ssa.Block386OS: {x86.AJOS, x86.AJOC},
+ ssa.Block386OC: {x86.AJOC, x86.AJOS},
ssa.Block386ULT: {x86.AJCS, x86.AJCC},
ssa.Block386UGE: {x86.AJCC, x86.AJCS},
ssa.Block386UGT: {x86.AJHI, x86.AJLS},
@@ -901,6 +920,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
case ssa.Block386EQ, ssa.Block386NE,
ssa.Block386LT, ssa.Block386GE,
ssa.Block386LE, ssa.Block386GT,
+ ssa.Block386OS, ssa.Block386OC,
ssa.Block386ULT, ssa.Block386UGT,
ssa.Block386ULE, ssa.Block386UGE:
jmp := blockJump[b.Kind]
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 94b7587026..2db115e20e 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -65,6 +65,7 @@ var bootstrapDirs = []string{
"cmd/internal/obj/wasm",
"cmd/internal/src",
"cmd/internal/sys",
+ "cmd/internal/xcoff",
"cmd/link",
"cmd/link/internal/amd64",
"cmd/link/internal/arm",
@@ -73,6 +74,7 @@ var bootstrapDirs = []string{
"cmd/link/internal/loadelf",
"cmd/link/internal/loadmacho",
"cmd/link/internal/loadpe",
+ "cmd/link/internal/loadxcoff",
"cmd/link/internal/mips",
"cmd/link/internal/mips64",
"cmd/link/internal/objfile",
diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go
index 6010f04b56..f8c52b1988 100644
--- a/src/cmd/doc/doc_test.go
+++ b/src/cmd/doc/doc_test.go
@@ -127,24 +127,87 @@ var tests = []test{
`type T1 = T2`, // Type alias
},
[]string{
- `const internalConstant = 2`, // No internal constants.
- `var internalVariable = 2`, // No internal variables.
- `func internalFunc(a int) bool`, // No internal functions.
- `Comment about exported constant`, // No comment for single constant.
- `Comment about exported variable`, // No comment for single variable.
- `Comment about block of constants.`, // No comment for constant block.
- `Comment about block of variables.`, // No comment for variable block.
- `Comment before ConstOne`, // No comment for first entry in constant block.
- `Comment before VarOne`, // No comment for first entry in variable block.
- `ConstTwo = 2`, // No second entry in constant block.
- `VarTwo = 2`, // No second entry in variable block.
- `VarFive = 5`, // From block starting with unexported variable.
- `type unexportedType`, // No unexported type.
- `unexportedTypedConstant`, // No unexported typed constant.
- `\bField`, // No fields.
- `Method`, // No methods.
- `someArgument[5-8]`, // No truncated arguments.
- `type T1 T2`, // Type alias does not display as type declaration.
+ `const internalConstant = 2`, // No internal constants.
+ `var internalVariable = 2`, // No internal variables.
+ `func internalFunc(a int) bool`, // No internal functions.
+ `Comment about exported constant`, // No comment for single constant.
+ `Comment about exported variable`, // No comment for single variable.
+ `Comment about block of constants`, // No comment for constant block.
+ `Comment about block of variables`, // No comment for variable block.
+ `Comment before ConstOne`, // No comment for first entry in constant block.
+ `Comment before VarOne`, // No comment for first entry in variable block.
+ `ConstTwo = 2`, // No second entry in constant block.
+ `VarTwo = 2`, // No second entry in variable block.
+ `VarFive = 5`, // From block starting with unexported variable.
+ `type unexportedType`, // No unexported type.
+ `unexportedTypedConstant`, // No unexported typed constant.
+ `\bField`, // No fields.
+ `Method`, // No methods.
+ `someArgument[5-8]`, // No truncated arguments.
+ `type T1 T2`, // Type alias does not display as type declaration.
+ },
+ },
+ // Package dump -all
+ {
+ "full package",
+ []string{"-all", p},
+ []string{
+ `package pkg .*import`,
+ `Package comment`,
+ `CONSTANTS`,
+ `Comment before ConstOne`,
+ `ConstOne = 1`,
+ `ConstTwo = 2 // Comment on line with ConstTwo`,
+ `ConstFive`,
+ `ConstSix`,
+ `Const block where first entry is unexported`,
+ `ConstLeft2, constRight2 uint64`,
+ `constLeft3, ConstRight3`,
+ `ConstLeft4, ConstRight4`,
+ `Duplicate = iota`,
+ `const CaseMatch = 1`,
+ `const Casematch = 2`,
+ `const ExportedConstant = 1`,
+ `const MultiLineConst = `,
+ `MultiLineString1`,
+ `VARIABLES`,
+ `Comment before VarOne`,
+ `VarOne = 1`,
+ `Comment about block of variables`,
+ `VarFive = 5`,
+ `var ExportedVariable = 1`,
+ `var LongLine = newLongLine\(`,
+ `var MultiLineVar = map\[struct {`,
+ `FUNCTIONS`,
+ `func ExportedFunc\(a int\) bool`,
+ `Comment about exported function`,
+ `func MultiLineFunc\(x interface`,
+ `func ReturnUnexported\(\) unexportedType`,
+ `TYPES`,
+ `type ExportedInterface interface`,
+ `type ExportedStructOneField struct`,
+ `type ExportedType struct`,
+ `Comment about exported type`,
+ `const ConstGroup4 ExportedType = ExportedType`,
+ `ExportedTypedConstant ExportedType = iota`,
+ `Constants tied to ExportedType`,
+ `func ExportedTypeConstructor\(\) \*ExportedType`,
+ `Comment about constructor for exported type`,
+ `func ReturnExported\(\) ExportedType`,
+ `func \(ExportedType\) ExportedMethod\(a int\) bool`,
+ `Comment about exported method`,
+ `type T1 = T2`,
+ `type T2 int`,
+ },
+ []string{
+ `constThree`,
+ `_, _ uint64 = 2 \* iota, 1 << iota`,
+ `constLeft1, constRight1`,
+ `duplicate`,
+ `varFour`,
+ `func internalFunc`,
+ `unexportedField`,
+ `func \(unexportedType\)`,
},
},
// Package dump -u
@@ -164,6 +227,58 @@ var tests = []test{
`MultiLine(String|Method|Field)`, // No data from multi line portions.
},
},
+ // Package dump -u -all
+ {
+ "full package",
+ []string{"-u", "-all", p},
+ []string{
+ `package pkg .*import`,
+ `Package comment`,
+ `CONSTANTS`,
+ `Comment before ConstOne`,
+ `ConstOne += 1`,
+ `ConstTwo += 2 // Comment on line with ConstTwo`,
+ `constThree = 3 // Comment on line with constThree`,
+ `ConstFive`,
+ `const internalConstant += 2`,
+ `Comment about internal constant`,
+ `VARIABLES`,
+ `Comment before VarOne`,
+ `VarOne += 1`,
+ `Comment about block of variables`,
+ `varFour += 4`,
+ `VarFive += 5`,
+ `varSix += 6`,
+ `var ExportedVariable = 1`,
+ `var LongLine = newLongLine\(`,
+ `var MultiLineVar = map\[struct {`,
+ `var internalVariable = 2`,
+ `Comment about internal variable`,
+ `FUNCTIONS`,
+ `func ExportedFunc\(a int\) bool`,
+ `Comment about exported function`,
+ `func MultiLineFunc\(x interface`,
+ `func internalFunc\(a int\) bool`,
+ `Comment about internal function`,
+ `func newLongLine\(ss .*string\)`,
+ `TYPES`,
+ `type ExportedType struct`,
+ `type T1 = T2`,
+ `type T2 int`,
+ `type unexportedType int`,
+ `Comment about unexported type`,
+ `ConstGroup1 unexportedType = iota`,
+ `ConstGroup2`,
+ `ConstGroup3`,
+ `ExportedTypedConstant_unexported unexportedType = iota`,
+ `Constants tied to unexportedType`,
+ `const unexportedTypedConstant unexportedType = 1`,
+ `func ReturnUnexported\(\) unexportedType`,
+ `func \(unexportedType\) ExportedMethod\(\) bool`,
+ `func \(unexportedType\) unexportedMethod\(\) bool`,
+ },
+ nil,
+ },
// Single constant.
{
@@ -207,6 +322,18 @@ var tests = []test{
},
nil,
},
+ // Block of constants -src.
+ {
+ "block of constants with -src",
+ []string{"-src", p, `ConstTwo`},
+ []string{
+ `Comment about block of constants`, // Top comment.
+ `ConstOne.*=.*1`, // Each constant seen.
+ `ConstTwo.*=.*2.*Comment on line with ConstTwo`,
+ `constThree`, // Even unexported constants.
+ },
+ nil,
+ },
// Block of constants with carryover type from unexported field.
{
"block of constants with carryover type",
@@ -295,6 +422,17 @@ var tests = []test{
},
nil,
},
+ // Function with -src.
+ {
+ "function with -src",
+ []string{"-src", p, `ExportedFunc`},
+ []string{
+ `Comment about exported function`, // Include comment.
+ `func ExportedFunc\(a int\) bool`,
+ `return true != false`, // Include body.
+ },
+ nil,
+ },
// Type.
{
@@ -304,21 +442,43 @@ var tests = []test{
`Comment about exported type`, // Include comment.
`type ExportedType struct`, // Type definition.
`Comment before exported field.*\n.*ExportedField +int` +
- `.*Comment on line with exported field.`,
- `ExportedEmbeddedType.*Comment on line with exported embedded field.`,
+ `.*Comment on line with exported field`,
+ `ExportedEmbeddedType.*Comment on line with exported embedded field`,
`Has unexported fields`,
`func \(ExportedType\) ExportedMethod\(a int\) bool`,
`const ExportedTypedConstant ExportedType = iota`, // Must include associated constant.
`func ExportedTypeConstructor\(\) \*ExportedType`, // Must include constructor.
- `io.Reader.*Comment on line with embedded Reader.`,
+ `io.Reader.*Comment on line with embedded Reader`,
},
[]string{
- `unexportedField`, // No unexported field.
- `int.*embedded`, // No unexported embedded field.
- `Comment about exported method.`, // No comment about exported method.
- `unexportedMethod`, // No unexported method.
- `unexportedTypedConstant`, // No unexported constant.
- `error`, // No embedded error.
+ `unexportedField`, // No unexported field.
+ `int.*embedded`, // No unexported embedded field.
+ `Comment about exported method`, // No comment about exported method.
+ `unexportedMethod`, // No unexported method.
+ `unexportedTypedConstant`, // No unexported constant.
+ `error`, // No embedded error.
+ },
+ },
+ // Type with -src. Will see unexported fields.
+ {
+ "type",
+ []string{"-src", p, `ExportedType`},
+ []string{
+ `Comment about exported type`, // Include comment.
+ `type ExportedType struct`, // Type definition.
+ `Comment before exported field`,
+ `ExportedField.*Comment on line with exported field`,
+ `ExportedEmbeddedType.*Comment on line with exported embedded field`,
+ `unexportedType.*Comment on line with unexported embedded field`,
+ `func \(ExportedType\) ExportedMethod\(a int\) bool`,
+ `const ExportedTypedConstant ExportedType = iota`, // Must include associated constant.
+ `func ExportedTypeConstructor\(\) \*ExportedType`, // Must include constructor.
+ `io.Reader.*Comment on line with embedded Reader`,
+ },
+ []string{
+ `Comment about exported method`, // No comment about exported method.
+ `unexportedMethod`, // No unexported method.
+ `unexportedTypedConstant`, // No unexported constant.
},
},
// Type T1 dump (alias).
@@ -341,14 +501,14 @@ var tests = []test{
`Comment about exported type`, // Include comment.
`type ExportedType struct`, // Type definition.
`Comment before exported field.*\n.*ExportedField +int`,
- `unexportedField.*int.*Comment on line with unexported field.`,
- `ExportedEmbeddedType.*Comment on line with exported embedded field.`,
- `\*ExportedEmbeddedType.*Comment on line with exported embedded \*field.`,
- `\*qualified.ExportedEmbeddedType.*Comment on line with exported embedded \*selector.field.`,
- `unexportedType.*Comment on line with unexported embedded field.`,
- `\*unexportedType.*Comment on line with unexported embedded \*field.`,
- `io.Reader.*Comment on line with embedded Reader.`,
- `error.*Comment on line with embedded error.`,
+ `unexportedField.*int.*Comment on line with unexported field`,
+ `ExportedEmbeddedType.*Comment on line with exported embedded field`,
+ `\*ExportedEmbeddedType.*Comment on line with exported embedded \*field`,
+ `\*qualified.ExportedEmbeddedType.*Comment on line with exported embedded \*selector.field`,
+ `unexportedType.*Comment on line with unexported embedded field`,
+ `\*unexportedType.*Comment on line with unexported embedded \*field`,
+ `io.Reader.*Comment on line with embedded Reader`,
+ `error.*Comment on line with embedded error`,
`func \(ExportedType\) unexportedMethod\(a int\) bool`,
`unexportedTypedConstant`,
},
@@ -380,8 +540,8 @@ var tests = []test{
`type ExportedInterface interface`, // Interface definition.
`Comment before exported method.*\n.*ExportedMethod\(\)` +
`.*Comment on line with exported method`,
- `io.Reader.*Comment on line with embedded Reader.`,
- `error.*Comment on line with embedded error.`,
+ `io.Reader.*Comment on line with embedded Reader`,
+ `error.*Comment on line with embedded error`,
`Has unexported methods`,
},
[]string{
@@ -400,9 +560,9 @@ var tests = []test{
`type ExportedInterface interface`, // Interface definition.
`Comment before exported method.*\n.*ExportedMethod\(\)` +
`.*Comment on line with exported method`,
- `unexportedMethod\(\).*Comment on line with unexported method.`,
- `io.Reader.*Comment on line with embedded Reader.`,
- `error.*Comment on line with embedded error.`,
+ `unexportedMethod\(\).*Comment on line with unexported method`,
+ `io.Reader.*Comment on line with embedded Reader`,
+ `error.*Comment on line with embedded error`,
},
[]string{
`Has unexported methods`,
@@ -418,7 +578,7 @@ var tests = []test{
`.*Comment on line with exported method`,
},
[]string{
- `Comment about exported interface.`,
+ `Comment about exported interface`,
},
},
@@ -428,7 +588,7 @@ var tests = []test{
[]string{p, `ExportedType.ExportedMethod`},
[]string{
`func \(ExportedType\) ExportedMethod\(a int\) bool`,
- `Comment about exported method.`,
+ `Comment about exported method`,
},
nil,
},
@@ -438,7 +598,18 @@ var tests = []test{
[]string{"-u", p, `ExportedType.unexportedMethod`},
[]string{
`func \(ExportedType\) unexportedMethod\(a int\) bool`,
- `Comment about unexported method.`,
+ `Comment about unexported method`,
+ },
+ nil,
+ },
+ // Method with -src.
+ {
+ "method with -src",
+ []string{"-src", p, `ExportedType.ExportedMethod`},
+ []string{
+ `func \(ExportedType\) ExportedMethod\(a int\) bool`,
+ `Comment about exported method`,
+ `return true != true`,
},
nil,
},
@@ -450,8 +621,8 @@ var tests = []test{
[]string{
`type ExportedType struct`,
`ExportedField int`,
- `Comment before exported field.`,
- `Comment on line with exported field.`,
+ `Comment before exported field`,
+ `Comment on line with exported field`,
`other fields elided`,
},
nil,
@@ -463,7 +634,7 @@ var tests = []test{
[]string{"-u", p, `ExportedType.unexportedField`},
[]string{
`unexportedField int`,
- `Comment on line with unexported field.`,
+ `Comment on line with unexported field`,
},
nil,
},
@@ -562,6 +733,9 @@ func TestDoc(t *testing.T) {
failed = true
}
}
+ if bytes.Count(output, []byte("TYPES\n")) > 1 {
+ t.Fatalf("%s: repeating headers", test.name)
+ }
if failed {
t.Logf("\n%s", output)
}
diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go
index 982c8e054a..614f19438c 100644
--- a/src/cmd/doc/main.go
+++ b/src/cmd/doc/main.go
@@ -28,6 +28,12 @@
// For commands, unless the -cmd flag is present "go doc command"
// shows only the package-level docs for the package.
//
+// The -src flag causes doc to print the full source code for the symbol, such
+// as the body of a struct, function or method.
+//
+// The -all flag causes doc to print all documentation for the package and
+// all its visible symbols. The argument must identify a package.
+//
// For complete documentation, run "go help doc".
package main
@@ -49,7 +55,9 @@ import (
var (
unexported bool // -u flag
matchCase bool // -c flag
+ showAll bool // -all flag
showCmd bool // -cmd flag
+ showSrc bool // -src flag
)
// usage is a replacement usage function for the flags package.
@@ -84,7 +92,9 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
matchCase = false
flagSet.BoolVar(&unexported, "u", false, "show unexported symbols as well as exported")
flagSet.BoolVar(&matchCase, "c", false, "symbol matching honors case (paths not affected)")
+ flagSet.BoolVar(&showAll, "all", false, "show all documentation for package")
flagSet.BoolVar(&showCmd, "cmd", false, "show symbols with package docs even if package is a command")
+ flagSet.BoolVar(&showSrc, "src", false, "show source code for symbol")
flagSet.Parse(args)
var paths []string
var symbol, method string
@@ -122,6 +132,15 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
unexported = true
}
+ // We have a package.
+ if showAll {
+ if symbol != "" {
+ return fmt.Errorf("-all valid only for package, not symbol: %s", symbol)
+ }
+ pkg.allDoc()
+ return
+ }
+
switch {
case symbol == "":
pkg.packageDoc() // The package exists, so we got some output.
diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go
index 14e41b9106..7c4e00767d 100644
--- a/src/cmd/doc/pkg.go
+++ b/src/cmd/doc/pkg.go
@@ -12,6 +12,7 @@ import (
"go/doc"
"go/format"
"go/parser"
+ "go/printer"
"go/token"
"io"
"log"
@@ -29,15 +30,18 @@ const (
)
type Package struct {
- writer io.Writer // Destination for output.
- name string // Package name, json for encoding/json.
- userPath string // String the user used to find this package.
- pkg *ast.Package // Parsed package.
- file *ast.File // Merged from all files in the package
- doc *doc.Package
- build *build.Package
- fs *token.FileSet // Needed for printing.
- buf bytes.Buffer
+ writer io.Writer // Destination for output.
+ name string // Package name, json for encoding/json.
+ userPath string // String the user used to find this package.
+ pkg *ast.Package // Parsed package.
+ file *ast.File // Merged from all files in the package
+ doc *doc.Package
+ build *build.Package
+ typedValue map[*doc.Value]bool // Consts and vars related to types.
+ constructor map[*doc.Func]bool // Constructors.
+ packageClausePrinted bool // Prevent repeated package clauses.
+ fs *token.FileSet // Needed for printing.
+ buf bytes.Buffer
}
type PackageError string // type returned by pkg.Fatalf.
@@ -137,22 +141,43 @@ func parsePackage(writer io.Writer, pkg *build.Package, userPath string) *Packag
// from finding the symbol. Work around this for now, but we
// should fix it in go/doc.
// A similar story applies to factory functions.
- docPkg := doc.New(astPkg, pkg.ImportPath, doc.AllDecls)
+ mode := doc.AllDecls
+ if showSrc {
+ mode |= doc.PreserveAST // See comment for Package.emit.
+ }
+ docPkg := doc.New(astPkg, pkg.ImportPath, mode)
+ typedValue := make(map[*doc.Value]bool)
+ constructor := make(map[*doc.Func]bool)
for _, typ := range docPkg.Types {
docPkg.Consts = append(docPkg.Consts, typ.Consts...)
+ for _, value := range typ.Consts {
+ typedValue[value] = true
+ }
docPkg.Vars = append(docPkg.Vars, typ.Vars...)
+ for _, value := range typ.Vars {
+ typedValue[value] = true
+ }
docPkg.Funcs = append(docPkg.Funcs, typ.Funcs...)
+ for _, fun := range typ.Funcs {
+ // We don't count it as a constructor bound to the type
+ // if the type itself is not exported.
+ if isExported(typ.Name) {
+ constructor[fun] = true
+ }
+ }
}
return &Package{
- writer: writer,
- name: pkg.Name,
- userPath: userPath,
- pkg: astPkg,
- file: ast.MergePackageFiles(astPkg, 0),
- doc: docPkg,
- build: pkg,
- fs: fs,
+ writer: writer,
+ name: pkg.Name,
+ userPath: userPath,
+ pkg: astPkg,
+ file: ast.MergePackageFiles(astPkg, 0),
+ doc: docPkg,
+ typedValue: typedValue,
+ constructor: constructor,
+ build: pkg,
+ fs: fs,
}
}
@@ -177,14 +202,24 @@ func (pkg *Package) newlines(n int) {
}
}
-// emit prints the node.
+// emit prints the node. If showSrc is true, it ignores the provided comment,
+// assuming the comment is in the node itself. Otherwise, the go/doc package
+// clears the stuff we don't want to print anyway. It's a bit of a magic trick.
func (pkg *Package) emit(comment string, node ast.Node) {
if node != nil {
- err := format.Node(&pkg.buf, pkg.fs, node)
+ var arg interface{} = node
+ if showSrc {
+ // Need an extra little dance to get internal comments to appear.
+ arg = &printer.CommentedNode{
+ Node: node,
+ Comments: pkg.file.Comments,
+ }
+ }
+ err := format.Node(&pkg.buf, pkg.fs, arg)
if err != nil {
log.Fatal(err)
}
- if comment != "" {
+ if comment != "" && !showSrc {
pkg.newlines(1)
doc.ToText(&pkg.buf, comment, " ", indent, indentedWidth)
pkg.newlines(2) // Blank line after comment to separate from next item.
@@ -384,6 +419,69 @@ func joinStrings(ss []string) string {
return strings.Join(ss, ", ")
}
+// allDoc prints all the docs for the package.
+func (pkg *Package) allDoc() {
+ defer pkg.flush()
+ if pkg.showInternals() {
+ pkg.packageClause(false)
+ }
+
+ doc.ToText(&pkg.buf, pkg.doc.Doc, "", indent, indentedWidth)
+ pkg.newlines(1)
+
+ printed := make(map[*ast.GenDecl]bool)
+
+ hdr := ""
+ printHdr := func(s string) {
+ if hdr != s {
+ pkg.Printf("\n%s\n\n", s)
+ hdr = s
+ }
+ }
+
+ // Constants.
+ for _, value := range pkg.doc.Consts {
+ // Constants and variables come in groups, and valueDoc prints
+ // all the items in the group. We only need to find one exported symbol.
+ for _, name := range value.Names {
+ if isExported(name) && !pkg.typedValue[value] {
+ printHdr("CONSTANTS")
+ pkg.valueDoc(value, printed)
+ break
+ }
+ }
+ }
+
+ // Variables.
+ for _, value := range pkg.doc.Vars {
+ // Constants and variables come in groups, and valueDoc prints
+ // all the items in the group. We only need to find one exported symbol.
+ for _, name := range value.Names {
+ if isExported(name) && !pkg.typedValue[value] {
+ printHdr("VARIABLES")
+ pkg.valueDoc(value, printed)
+ break
+ }
+ }
+ }
+
+ // Functions.
+ for _, fun := range pkg.doc.Funcs {
+ if isExported(fun.Name) && !pkg.constructor[fun] {
+ printHdr("FUNCTIONS")
+ pkg.emit(fun.Doc, fun.Decl)
+ }
+ }
+
+ // Types.
+ for _, typ := range pkg.doc.Types {
+ if isExported(typ.Name) {
+ printHdr("TYPES")
+ pkg.typeDoc(typ)
+ }
+ }
+}
+
// packageDoc prints the docs for the package (package doc plus one-liners of the rest).
func (pkg *Package) packageDoc() {
defer pkg.flush()
@@ -420,6 +518,10 @@ func (pkg *Package) showInternals() bool {
// user's argument is identical to the actual package path or
// is empty, meaning it's the current directory.
func (pkg *Package) packageClause(checkUserPath bool) {
+ if pkg.packageClausePrinted {
+ return
+ }
+
if checkUserPath {
if pkg.userPath == "" || pkg.userPath == pkg.build.ImportPath {
return
@@ -457,6 +559,7 @@ func (pkg *Package) packageClause(checkUserPath bool) {
if !usingModules && importPath != pkg.build.ImportPath {
pkg.Printf("WARNING: package source is installed in %q\n", pkg.build.ImportPath)
}
+ pkg.packageClausePrinted = true
}
// valueSummary prints a one-line summary for each set of values and constants.
@@ -491,22 +594,10 @@ func (pkg *Package) valueSummary(values []*doc.Value, showGrouped bool) {
// funcSummary prints a one-line summary for each function. Constructors
// are printed by typeSummary, below, and so can be suppressed here.
func (pkg *Package) funcSummary(funcs []*doc.Func, showConstructors bool) {
- // First, identify the constructors. Don't bother figuring out if they're exported.
- var isConstructor map[*doc.Func]bool
- if !showConstructors {
- isConstructor = make(map[*doc.Func]bool)
- for _, typ := range pkg.doc.Types {
- if isExported(typ.Name) {
- for _, f := range typ.Funcs {
- isConstructor[f] = true
- }
- }
- }
- }
for _, fun := range funcs {
// Exported functions only. The go/doc package does not include methods here.
if isExported(fun.Name) {
- if !isConstructor[fun] {
+ if showConstructors || !pkg.constructor[fun] {
pkg.Printf("%s\n", pkg.oneLineNode(fun.Decl))
}
}
@@ -611,7 +702,6 @@ func (pkg *Package) symbolDoc(symbol string) bool {
}
// Symbol is a function.
decl := fun.Decl
- decl.Body = nil
pkg.emit(fun.Doc, decl)
found = true
}
@@ -624,71 +714,12 @@ func (pkg *Package) symbolDoc(symbol string) bool {
// So we remember which declarations we've printed to avoid duplication.
printed := make(map[*ast.GenDecl]bool)
for _, value := range values {
- // Print each spec only if there is at least one exported symbol in it.
- // (See issue 11008.)
- // TODO: Should we elide unexported symbols from a single spec?
- // It's an unlikely scenario, probably not worth the trouble.
- // TODO: Would be nice if go/doc did this for us.
- specs := make([]ast.Spec, 0, len(value.Decl.Specs))
- var typ ast.Expr
- for _, spec := range value.Decl.Specs {
- vspec := spec.(*ast.ValueSpec)
-
- // The type name may carry over from a previous specification in the
- // case of constants and iota.
- if vspec.Type != nil {
- typ = vspec.Type
- }
-
- for _, ident := range vspec.Names {
- if isExported(ident.Name) {
- if vspec.Type == nil && vspec.Values == nil && typ != nil {
- // This a standalone identifier, as in the case of iota usage.
- // Thus, assume the type comes from the previous type.
- vspec.Type = &ast.Ident{
- Name: pkg.oneLineNode(typ),
- NamePos: vspec.End() - 1,
- }
- }
-
- specs = append(specs, vspec)
- typ = nil // Only inject type on first exported identifier
- break
- }
- }
- }
- if len(specs) == 0 || printed[value.Decl] {
- continue
- }
- value.Decl.Specs = specs
- if !found {
- pkg.packageClause(true)
- }
- pkg.emit(value.Doc, value.Decl)
- printed[value.Decl] = true
+ pkg.valueDoc(value, printed)
found = true
}
// Types.
for _, typ := range pkg.findTypes(symbol) {
- if !found {
- pkg.packageClause(true)
- }
- decl := typ.Decl
- spec := pkg.findTypeSpec(decl, typ.Name)
- trimUnexportedElems(spec)
- // If there are multiple types defined, reduce to just this one.
- if len(decl.Specs) > 1 {
- decl.Specs = []ast.Spec{spec}
- }
- pkg.emit(typ.Doc, decl)
- // Show associated methods, constants, etc.
- if len(typ.Consts) > 0 || len(typ.Vars) > 0 || len(typ.Funcs) > 0 || len(typ.Methods) > 0 {
- pkg.Printf("\n")
- }
- pkg.valueSummary(typ.Consts, true)
- pkg.valueSummary(typ.Vars, true)
- pkg.funcSummary(typ.Funcs, true)
- pkg.funcSummary(typ.Methods, true)
+ pkg.typeDoc(typ)
found = true
}
if !found {
@@ -700,10 +731,98 @@ func (pkg *Package) symbolDoc(symbol string) bool {
return true
}
+// valueDoc prints the docs for a constant or variable.
+func (pkg *Package) valueDoc(value *doc.Value, printed map[*ast.GenDecl]bool) {
+ if printed[value.Decl] {
+ return
+ }
+ // Print each spec only if there is at least one exported symbol in it.
+ // (See issue 11008.)
+ // TODO: Should we elide unexported symbols from a single spec?
+ // It's an unlikely scenario, probably not worth the trouble.
+ // TODO: Would be nice if go/doc did this for us.
+ specs := make([]ast.Spec, 0, len(value.Decl.Specs))
+ var typ ast.Expr
+ for _, spec := range value.Decl.Specs {
+ vspec := spec.(*ast.ValueSpec)
+
+ // The type name may carry over from a previous specification in the
+ // case of constants and iota.
+ if vspec.Type != nil {
+ typ = vspec.Type
+ }
+
+ for _, ident := range vspec.Names {
+ if showSrc || isExported(ident.Name) {
+ if vspec.Type == nil && vspec.Values == nil && typ != nil {
+ // This a standalone identifier, as in the case of iota usage.
+ // Thus, assume the type comes from the previous type.
+ vspec.Type = &ast.Ident{
+ Name: pkg.oneLineNode(typ),
+ NamePos: vspec.End() - 1,
+ }
+ }
+
+ specs = append(specs, vspec)
+ typ = nil // Only inject type on first exported identifier
+ break
+ }
+ }
+ }
+ if len(specs) == 0 {
+ return
+ }
+ value.Decl.Specs = specs
+ pkg.emit(value.Doc, value.Decl)
+ printed[value.Decl] = true
+}
+
+// typeDoc prints the docs for a type, including constructors and other items
+// related to it.
+func (pkg *Package) typeDoc(typ *doc.Type) {
+ decl := typ.Decl
+ spec := pkg.findTypeSpec(decl, typ.Name)
+ trimUnexportedElems(spec)
+ // If there are multiple types defined, reduce to just this one.
+ if len(decl.Specs) > 1 {
+ decl.Specs = []ast.Spec{spec}
+ }
+ pkg.emit(typ.Doc, decl)
+ pkg.newlines(2)
+ // Show associated methods, constants, etc.
+ if showAll {
+ printed := make(map[*ast.GenDecl]bool)
+ // We can use append here to print consts, then vars. Ditto for funcs and methods.
+ values := typ.Consts
+ values = append(values, typ.Vars...)
+ for _, value := range values {
+ for _, name := range value.Names {
+ if isExported(name) {
+ pkg.valueDoc(value, printed)
+ break
+ }
+ }
+ }
+ funcs := typ.Funcs
+ funcs = append(funcs, typ.Methods...)
+ for _, fun := range funcs {
+ if isExported(fun.Name) {
+ pkg.emit(fun.Doc, fun.Decl)
+ }
+ }
+ } else {
+ pkg.valueSummary(typ.Consts, true)
+ pkg.valueSummary(typ.Vars, true)
+ pkg.funcSummary(typ.Funcs, true)
+ pkg.funcSummary(typ.Methods, true)
+ }
+}
+
// trimUnexportedElems modifies spec in place to elide unexported fields from
-// structs and methods from interfaces (unless the unexported flag is set).
+// structs and methods from interfaces (unless the unexported flag is set or we
+// are asked to show the original source).
func trimUnexportedElems(spec *ast.TypeSpec) {
- if unexported {
+ if unexported || showSrc {
return
}
switch typ := spec.Type.(type) {
@@ -808,7 +927,6 @@ func (pkg *Package) printMethodDoc(symbol, method string) bool {
for _, meth := range typ.Methods {
if match(method, meth.Name) {
decl := meth.Decl
- decl.Body = nil
pkg.emit(meth.Doc, decl)
found = true
}
diff --git a/src/cmd/doc/testdata/pkg.go b/src/cmd/doc/testdata/pkg.go
index bc069939f8..50105b5fcc 100644
--- a/src/cmd/doc/testdata/pkg.go
+++ b/src/cmd/doc/testdata/pkg.go
@@ -5,6 +5,8 @@
// Package comment.
package pkg
+import "io"
+
// Constants
// Comment about exported constant.
@@ -52,7 +54,9 @@ var (
)
// Comment about exported function.
-func ExportedFunc(a int) bool
+func ExportedFunc(a int) bool {
+ return true != false
+}
// Comment about internal function.
func internalFunc(a int) bool
@@ -73,7 +77,7 @@ type ExportedType struct {
// Comment about exported method.
func (ExportedType) ExportedMethod(a int) bool {
- return true
+ return true != true
}
// Comment about unexported method.
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 9528ca2984..7866b39793 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -342,12 +342,21 @@
// cd go/src/encoding/json; go doc decode
//
// Flags:
+// -all
+// Show all the documentation for the package.
// -c
// Respect case when matching symbols.
// -cmd
// Treat a command (package main) like a regular package.
// Otherwise package main's exported symbols are hidden
// when showing the package's top-level documentation.
+// -src
+// Show the full source code for the symbol. This will
+// display the full Go source of its declaration and
+// definition, such as a function definition (including
+// the body), type declaration or enclosing const
+// block. The output may therefore include unexported
+// details.
// -u
// Show documentation for unexported as well as exported
// symbols, methods, and fields.
@@ -2652,6 +2661,8 @@
// Run enough iterations of each benchmark to take t, specified
// as a time.Duration (for example, -benchtime 1h30s).
// The default is 1 second (1s).
+// The special syntax Nx means to run the benchmark N times
+// (for example, -benchtime 100x).
//
// -count n
// Run each test and benchmark n times (default 1).
diff --git a/src/cmd/go/internal/base/signal_unix.go b/src/cmd/go/internal/base/signal_unix.go
index 38490b571b..c109eecadb 100644
--- a/src/cmd/go/internal/base/signal_unix.go
+++ b/src/cmd/go/internal/base/signal_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js linux nacl netbsd openbsd solaris
package base
diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go
index 4e7dca082d..bad05ff912 100644
--- a/src/cmd/go/internal/doc/doc.go
+++ b/src/cmd/go/internal/doc/doc.go
@@ -106,12 +106,21 @@ Examples:
cd go/src/encoding/json; go doc decode
Flags:
+ -all
+ Show all the documentation for the package.
-c
Respect case when matching symbols.
-cmd
Treat a command (package main) like a regular package.
Otherwise package main's exported symbols are hidden
when showing the package's top-level documentation.
+ -src
+ Show the full source code for the symbol. This will
+ display the full Go source of its declaration and
+ definition, such as a function definition (including
+ the body), type declaration or enclosing const
+ block. The output may therefore include unexported
+ details.
-u
Show documentation for unexported as well as exported
symbols, methods, and fields.
diff --git a/src/cmd/go/internal/imports/build.go b/src/cmd/go/internal/imports/build.go
index d1adf9440c..a67d2ebaae 100644
--- a/src/cmd/go/internal/imports/build.go
+++ b/src/cmd/go/internal/imports/build.go
@@ -207,5 +207,5 @@ func init() {
}
}
-const goosList = "android darwin dragonfly freebsd js linux nacl netbsd openbsd plan9 solaris windows zos "
+const goosList = "aix android darwin dragonfly freebsd js linux nacl netbsd openbsd plan9 solaris windows zos "
const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm "
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index d9696783a9..afb70a540c 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -446,6 +446,10 @@ const (
// this package, as part of a bigger load operation, and by GOPATH-based "go get".
// TODO(rsc): When GOPATH-based "go get" is removed, unexport this function.
func LoadImport(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package {
+ if path == "" {
+ panic("LoadImport called with empty package path")
+ }
+
stk.Push(path)
defer stk.Pop()
@@ -1756,6 +1760,9 @@ func LoadPackageNoFlags(arg string, stk *ImportStack) *Package {
// loadPackage accepts pseudo-paths beginning with cmd/ to denote commands
// in the Go command directory, as well as paths to those directories.
func loadPackage(arg string, stk *ImportStack) *Package {
+ if arg == "" {
+ panic("loadPackage called with empty package path")
+ }
if build.IsLocalImport(arg) {
dir := arg
if !filepath.IsAbs(dir) {
@@ -1854,6 +1861,9 @@ func PackagesAndErrors(patterns []string) []*Package {
for _, m := range matches {
for _, pkg := range m.Pkgs {
+ if pkg == "" {
+ panic(fmt.Sprintf("ImportPaths returned empty package for pattern %s", m.Pattern))
+ }
p := loadPackage(pkg, &stk)
p.Match = append(p.Match, m.Pattern)
p.Internal.CmdlinePkg = true
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index 87940a8f02..bcf8609826 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -694,6 +694,10 @@ func (r *gitRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser,
return nil, "", err
}
+ if err := ensureGitAttributes(r.dir); err != nil {
+ return nil, "", err
+ }
+
// Incredibly, git produces different archives depending on whether
// it is running on a Windows system or not, in an attempt to normalize
// text file line endings. Setting -c core.autocrlf=input means only
@@ -709,3 +713,43 @@ func (r *gitRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser,
return ioutil.NopCloser(bytes.NewReader(archive)), "", nil
}
+
+// ensureGitAttributes makes sure export-subst and export-ignore features are
+// disabled for this repo. This is intended to be run prior to running git
+// archive so that zip files are generated that produce consistent ziphashes
+// for a given revision, independent of variables such as git version and the
+// size of the repo.
+//
+// See: https://github.com/golang/go/issues/27153
+func ensureGitAttributes(repoDir string) (err error) {
+ const attr = "\n* -export-subst -export-ignore\n"
+
+ d := repoDir + "/info"
+ p := d + "/attributes"
+
+ if err := os.MkdirAll(d, 0755); err != nil {
+ return err
+ }
+
+ f, err := os.OpenFile(p, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ closeErr := f.Close()
+ if closeErr != nil {
+ err = closeErr
+ }
+ }()
+
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ if !bytes.HasSuffix(b, []byte(attr)) {
+ _, err := f.WriteString(attr)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 90a5bd8130..54a2b724d4 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -247,7 +247,7 @@ func runGet(cmd *base.Command, args []string) {
// Deciding which module to upgrade/downgrade for a particular argument is difficult.
// Patterns only make it more difficult.
// We impose restrictions to avoid needing to interlace pattern expansion,
- // like in in modload.ImportPaths.
+ // like in modload.ImportPaths.
// Specifically, these patterns are supported:
//
// - Relative paths like ../../foo or ../../foo... are restricted to matching directories
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index acee4a91e7..a506c25dc7 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -30,6 +30,9 @@ func isStandardImportPath(path string) bool {
}
func findStandardImportPath(path string) string {
+ if path == "" {
+ panic("findStandardImportPath called with empty path")
+ }
if search.IsStandardImportPath(path) {
if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
return filepath.Join(cfg.GOROOT, "src", path)
@@ -236,13 +239,10 @@ func ModInfoProg(info string) []byte {
// Populate it in an init func so that it will work with go:linkname,
// but use a string constant instead of the name 'string' in case
// package main shadows the built-in 'string' with some local declaration.
- return []byte(fmt.Sprintf(`
- package main
- import _ "unsafe"
- //go:linkname __debug_modinfo__ runtime/debug.modinfo
- var __debug_modinfo__ = ""
- func init() {
- __debug_modinfo__ = %q
- }
+ return []byte(fmt.Sprintf(`package main
+import _ "unsafe"
+//go:linkname __debug_modinfo__ runtime/debug.modinfo
+var __debug_modinfo__ = ""
+func init() { __debug_modinfo__ = %q }
`, string(infoStart)+info+string(infoEnd)))
}
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index 6c1525da9a..3b8c0b6435 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -397,6 +397,9 @@ func ModuleUsedDirectly(path string) bool {
// Lookup requires that one of the Load functions in this package has already
// been called.
func Lookup(path string) (dir, realPath string, err error) {
+ if path == "" {
+ panic("Lookup called with empty package path")
+ }
pkg, ok := loaded.pkgCache.Get(path).(*loadPkg)
if !ok {
// The loader should have found all the relevant paths.
diff --git a/src/cmd/go/internal/semver/semver.go b/src/cmd/go/internal/semver/semver.go
index 4af7118e55..d61c6b476a 100644
--- a/src/cmd/go/internal/semver/semver.go
+++ b/src/cmd/go/internal/semver/semver.go
@@ -312,9 +312,8 @@ func compareInt(x, y string) int {
}
if x < y {
return -1
- } else {
- return +1
}
+ return +1
}
func comparePrerelease(x, y string) int {
@@ -353,9 +352,8 @@ func comparePrerelease(x, y string) int {
if ix != iy {
if ix {
return -1
- } else {
- return +1
}
+ return +1
}
if ix {
if len(dx) < len(dy) {
@@ -367,16 +365,14 @@ func comparePrerelease(x, y string) int {
}
if dx < dy {
return -1
- } else {
- return +1
}
+ return +1
}
}
if x == "" {
return -1
- } else {
- return +1
}
+ return +1
}
func nextIdent(x string) (dx, rest string) {
diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go
index 3295e8ffe2..70deea3643 100644
--- a/src/cmd/go/internal/test/test.go
+++ b/src/cmd/go/internal/test/test.go
@@ -212,6 +212,8 @@ const testFlag2 = `
Run enough iterations of each benchmark to take t, specified
as a time.Duration (for example, -benchtime 1h30s).
The default is 1 second (1s).
+ The special syntax Nx means to run the benchmark N times
+ (for example, -benchtime 100x).
-count n
Run each test and benchmark n times (default 1).
diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go
index 50eac425ec..cfa4352cb9 100644
--- a/src/cmd/go/internal/vet/vetflag.go
+++ b/src/cmd/go/internal/vet/vetflag.go
@@ -5,9 +5,14 @@
package vet
import (
+ "bytes"
+ "encoding/json"
"flag"
"fmt"
+ "log"
"os"
+ "os/exec"
+ "path/filepath"
"strings"
"cmd/go/internal/base"
@@ -16,72 +21,94 @@ import (
"cmd/go/internal/work"
)
-const cmd = "vet"
+// go vet flag processing
+//
+// We query the flags of the tool specified by GOVETTOOL (default:
+// cmd/vet) and accept any of those flags plus any flag valid for 'go
+// build'. The tool must support -flags, which prints a description of
+// its flags in JSON to stdout.
-// vetFlagDefn is the set of flags we process.
-var vetFlagDefn = []*cmdflag.Defn{
- // Note: Some flags, in particular -tags and -v, are known to
- // vet but also defined as build flags. This works fine, so we
- // don't define them here but use AddBuildFlags to init them.
- // However some, like -x, are known to the build but not
- // to vet. We handle them in vetFlags.
+// GOVETTOOL specifies the vet command to run.
+// This must be an environment variable because
+// we need it before flag processing, as we execute
+// $GOVETTOOL to discover the set of flags it supports.
+//
+// Using an environment variable also makes it easy for users to opt in
+// to (and later, opt out of) the new cmd/vet analysis driver during the
+// transition. It is also used by tests.
+var vetTool = os.Getenv("GOVETTOOL")
- // local.
- {Name: "all", BoolVar: new(bool), PassToTest: true},
- {Name: "asmdecl", BoolVar: new(bool), PassToTest: true},
- {Name: "assign", BoolVar: new(bool), PassToTest: true},
- {Name: "atomic", BoolVar: new(bool), PassToTest: true},
- {Name: "bool", BoolVar: new(bool), PassToTest: true},
- {Name: "buildtags", BoolVar: new(bool), PassToTest: true},
- {Name: "cgocall", BoolVar: new(bool), PassToTest: true},
- {Name: "composites", BoolVar: new(bool), PassToTest: true},
- {Name: "copylocks", BoolVar: new(bool), PassToTest: true},
- {Name: "httpresponse", BoolVar: new(bool), PassToTest: true},
- {Name: "lostcancel", BoolVar: new(bool), PassToTest: true},
- {Name: "methods", BoolVar: new(bool), PassToTest: true},
- {Name: "nilfunc", BoolVar: new(bool), PassToTest: true},
- {Name: "printf", BoolVar: new(bool), PassToTest: true},
- {Name: "printfuncs", PassToTest: true},
- {Name: "rangeloops", BoolVar: new(bool), PassToTest: true},
- {Name: "shadow", BoolVar: new(bool), PassToTest: true},
- {Name: "shadowstrict", BoolVar: new(bool), PassToTest: true},
- {Name: "shift", BoolVar: new(bool), PassToTest: true},
- {Name: "source", BoolVar: new(bool), PassToTest: true},
- {Name: "structtags", BoolVar: new(bool), PassToTest: true},
- {Name: "tests", BoolVar: new(bool), PassToTest: true},
- {Name: "unreachable", BoolVar: new(bool), PassToTest: true},
- {Name: "unsafeptr", BoolVar: new(bool), PassToTest: true},
- {Name: "unusedfuncs", PassToTest: true},
- {Name: "unusedresult", BoolVar: new(bool), PassToTest: true},
- {Name: "unusedstringmethods", PassToTest: true},
-}
+// vetFlags processes the command line, splitting it at the first non-flag
+// into the list of flags and list of packages.
+func vetFlags(args []string) (passToVet, packageNames []string) {
+ // Query the vet command for its flags.
+ tool := vetTool
+ if tool != "" {
+ var err error
+ tool, err = filepath.Abs(tool)
+ if err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ tool = base.Tool("vet")
+ }
+ out := new(bytes.Buffer)
+ vetcmd := exec.Command(tool, "-flags")
+ vetcmd.Stdout = out
+ if err := vetcmd.Run(); err != nil {
+ fmt.Fprintf(os.Stderr, "go vet: can't execute %s -flags: %v\n", tool, err)
+ os.Exit(2)
+ }
+ var analysisFlags []struct {
+ Name string
+ Bool bool
+ Usage string
+ }
+ if err := json.Unmarshal(out.Bytes(), &analysisFlags); err != nil {
+ fmt.Fprintf(os.Stderr, "go vet: can't unmarshal JSON from %s -flags: %v", tool, err)
+ os.Exit(2)
+ }
-var vetTool string
+ // Add vet's flags to vetflagDefn.
+ //
+ // Some flags, in particular -tags and -v, are known to vet but
+ // also defined as build flags. This works fine, so we don't
+ // define them here but use AddBuildFlags to init them.
+ // However some, like -x, are known to the build but not to vet.
+ var vetFlagDefn []*cmdflag.Defn
+ for _, f := range analysisFlags {
+ switch f.Name {
+ case "tags", "v":
+ continue
+ }
+ defn := &cmdflag.Defn{
+ Name: f.Name,
+ PassToTest: true,
+ }
+ if f.Bool {
+ defn.BoolVar = new(bool)
+ }
+ vetFlagDefn = append(vetFlagDefn, defn)
+ }
-// add build flags to vetFlagDefn.
-func init() {
- cmdflag.AddKnownFlags("vet", vetFlagDefn)
+ // Add build flags to vetFlagDefn.
var cmd base.Command
work.AddBuildFlags(&cmd)
- cmd.Flag.StringVar(&vetTool, "vettool", "", "path to vet tool binary") // for cmd/vet tests; undocumented for now
cmd.Flag.VisitAll(func(f *flag.Flag) {
vetFlagDefn = append(vetFlagDefn, &cmdflag.Defn{
Name: f.Name,
Value: f.Value,
})
})
-}
-// vetFlags processes the command line, splitting it at the first non-flag
-// into the list of flags and list of packages.
-func vetFlags(args []string) (passToVet, packageNames []string) {
+ // Process args.
args = str.StringList(cmdflag.FindGOFLAGS(vetFlagDefn), args)
for i := 0; i < len(args); i++ {
if !strings.HasPrefix(args[i], "-") {
return args[:i], args[i:]
}
- f, value, extraWord := cmdflag.Parse(cmd, vetFlagDefn, args, i)
+ f, value, extraWord := cmdflag.Parse("vet", vetFlagDefn, args, i)
if f == nil {
fmt.Fprintf(os.Stderr, "vet: flag %q not defined\n", args[i])
fmt.Fprintf(os.Stderr, "Run \"go help vet\" for more information\n")
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
index 8b97e8b75b..af3183ae9a 100644
--- a/src/cmd/go/internal/work/buildid.go
+++ b/src/cmd/go/internal/work/buildid.go
@@ -178,7 +178,7 @@ func (b *Builder) toolID(name string) string {
path := base.Tool(name)
desc := "go tool " + name
- // Special case: undocumented -vettool overrides usual vet, for testing vet.
+ // Special case: undocumented $GOVETTOOL overrides usual vet, for testing vet.
if name == "vet" && VetTool != "" {
path = VetTool
desc = VetTool
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 158f5f3b17..6ae263431c 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -1648,6 +1648,7 @@ func (b *Builder) cover(a *Action, dst, src string, varName string) error {
var objectMagic = [][]byte{
{'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive
+ {'<', 'b', 'i', 'g', 'a', 'f', '>', '\n'}, // Package AIX big archive
{'\x7F', 'E', 'L', 'F'}, // ELF
{0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit
{0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit
@@ -1658,6 +1659,8 @@ var objectMagic = [][]byte{
{0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64
{0x00, 0x00, 0x06, 0x47}, // Plan 9 arm
{0x00, 0x61, 0x73, 0x6D}, // WASM
+ {0x01, 0xDF}, // XCOFF 32bit
+ {0x01, 0xF7}, // XCOFF 64bit
}
func isObject(s string) bool {
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 31c6ede2a5..8708dacd41 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -614,7 +614,7 @@ func scriptMatch(ts *testScript, neg bool, args []string, text, name string) {
want = 2
}
if len(args) != want {
- ts.fatalf("usage: %s [-count=N] 'pattern' file%s", name, extraUsage)
+ ts.fatalf("usage: %s [-count=N] 'pattern'%s", name, extraUsage)
}
pattern := args[0]
diff --git a/src/cmd/go/testdata/script/mod_git_export_subst.txt b/src/cmd/go/testdata/script/mod_git_export_subst.txt
new file mode 100644
index 0000000000..2b8e2bc7bc
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_git_export_subst.txt
@@ -0,0 +1,21 @@
+env GO111MODULE=on
+env GOPROXY=
+
+# Testing that git export-subst is disabled
+[!net] skip
+[!exec:git] skip
+go build
+
+-- x.go --
+package x
+
+import _ "github.com/jasonkeene/export-subst"
+
+-- go.mod --
+module x
+
+require github.com/jasonkeene/export-subst v0.0.0-20180927204031-5845945ec626
+
+-- go.sum --
+github.com/jasonkeene/export-subst v0.0.0-20180927204031-5845945ec626 h1:AUkXi/xFnm7lH2pgtvVkGb7buRn1ywFHw+xDpZ29Rz0=
+github.com/jasonkeene/export-subst v0.0.0-20180927204031-5845945ec626/go.mod h1:DwJXqVtrgrQkv3Giuf2Jh4YyubVe7y41S1eOIaysTJw=
diff --git a/src/cmd/go/testdata/testterminal18153/terminal_test.go b/src/cmd/go/testdata/testterminal18153/terminal_test.go
index d662e55ee5..71493efe98 100644
--- a/src/cmd/go/testdata/testterminal18153/terminal_test.go
+++ b/src/cmd/go/testdata/testterminal18153/terminal_test.go
@@ -5,7 +5,7 @@
// +build linux
// This test is run by src/cmd/dist/test.go (cmd_go_test_terminal),
-// and not by cmd/go's tests. This is because this test requires that
+// and not by cmd/go's tests. This is because this test requires
// that it be called with its stdout and stderr being a terminal.
// dist doesn't run `cmd/go test` against this test directory if
// dist's stdout/stderr aren't terminals.
diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go
index 65647c37ae..c4c75e41d4 100644
--- a/src/cmd/internal/obj/arm64/a.out.go
+++ b/src/cmd/internal/obj/arm64/a.out.go
@@ -407,10 +407,12 @@ const (
C_ABCON0 // could be C_ADDCON0 or C_BITCON
C_ADDCON0 // 12-bit unsigned, unshifted
C_ABCON // could be C_ADDCON or C_BITCON
+ C_AMCON // could be C_ADDCON or C_MOVCON
C_ADDCON // 12-bit unsigned, shifted left by 0 or 12
C_MBCON // could be C_MOVCON or C_BITCON
C_MOVCON // generated by a 16-bit constant, optionally inverted and/or shifted by multiple of 16
C_BITCON // bitfield and logical immediate masks
+ C_ADDCON2 // 24-bit constant
C_LCON // 32-bit constant
C_VCON // 64-bit constant
C_FCON // floating-point constant
diff --git a/src/cmd/internal/obj/arm64/anames7.go b/src/cmd/internal/obj/arm64/anames7.go
index 92f0cec942..f8fdc68c1e 100644
--- a/src/cmd/internal/obj/arm64/anames7.go
+++ b/src/cmd/internal/obj/arm64/anames7.go
@@ -23,10 +23,12 @@ var cnames7 = []string{
"ABCON0",
"ADDCON0",
"ABCON",
+ "AMCON",
"ADDCON",
"MBCON",
"MOVCON",
"BITCON",
+ "ADDCON2",
"LCON",
"VCON",
"FCON",
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 46fdcdcf7d..6a6e81807a 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -164,6 +164,10 @@ func OPBIT(x uint32) uint32 {
return 1<<30 | 0<<29 | 0xD6<<21 | 0<<16 | x<<10
}
+func MOVCONST(d int64, s int, rt int) uint32 {
+ return uint32(((d>>uint(s*16))&0xFFFF)<<5) | uint32(s)&3<<21 | uint32(rt&31)
+}
+
const (
LFROM = 1 << 0
LTO = 1 << 1
@@ -192,6 +196,8 @@ var optab = []Optab{
{AADD, C_BITCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0},
{AADD, C_BITCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0},
{ACMP, C_BITCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0},
+ {AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, 48, 8, 0, 0, 0},
+ {AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, 48, 8, 0, 0, 0},
{AADD, C_VCON, C_RSP, C_NONE, C_RSP, 13, 8, 0, LFROM, 0},
{AADD, C_VCON, C_NONE, C_NONE, C_RSP, 13, 8, 0, LFROM, 0},
{ACMP, C_VCON, C_REG, C_NONE, C_NONE, 13, 8, 0, LFROM, 0},
@@ -270,12 +276,10 @@ var optab = []Optab{
/* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */
{AMOVW, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0},
{AMOVD, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0},
-
- // TODO: these don't work properly.
- // { AMOVW, C_ADDCON, C_NONE, C_REG, 2, 4, 0 , 0},
- // { AMOVD, C_ADDCON, C_NONE, C_REG, 2, 4, 0 , 0},
{AMOVW, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0},
{AMOVD, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0},
+ {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
+ {AMOVD, C_VCON, C_NONE, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
{AMOVK, C_VCON, C_NONE, C_NONE, C_REG, 33, 4, 0, 0, 0},
{AMOVD, C_AACON, C_NONE, C_NONE, C_REG, 4, 4, REGFROM, 0, 0},
@@ -316,9 +320,7 @@ var optab = []Optab{
{AWORD, C_NONE, C_NONE, C_NONE, C_LCON, 14, 4, 0, 0, 0},
{AWORD, C_NONE, C_NONE, C_NONE, C_LEXT, 14, 4, 0, 0, 0},
{AWORD, C_NONE, C_NONE, C_NONE, C_ADDR, 14, 4, 0, 0, 0},
- {AMOVW, C_VCON, C_NONE, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
{AMOVW, C_VCONADDR, C_NONE, C_NONE, C_REG, 68, 8, 0, 0, 0},
- {AMOVD, C_VCON, C_NONE, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
{AMOVD, C_VCONADDR, C_NONE, C_NONE, C_REG, 68, 8, 0, 0, 0},
{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
{AMOVBU, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
@@ -1046,6 +1048,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) {
C_NOREG4K,
C_LOREG,
C_LACON,
+ C_ADDCON2,
C_LCON,
C_VCON:
if a.Name == obj.NAME_EXTERN {
@@ -1102,6 +1105,23 @@ func isSTXPop(op obj.As) bool {
return false
}
+func isANDWop(op obj.As) bool {
+ switch op {
+ case AANDW, AORRW, AEORW, AANDSW, ATSTW,
+ ABICW, AEONW, AORNW, ABICSW:
+ return true
+ }
+ return false
+}
+
+func isADDWop(op obj.As) bool {
+ switch op {
+ case AADDW, AADDSW, ASUBW, ASUBSW, ACMNW, ACMPW:
+ return true
+ }
+ return false
+}
+
func isRegShiftOrExt(a *obj.Addr) bool {
return (a.Index-obj.RBaseARM64)®_EXT != 0 || (a.Index-obj.RBaseARM64)®_LSL != 0
}
@@ -1408,6 +1428,52 @@ func rclass(r int16) int {
return C_GOK
}
+// con32class reclassifies the constant of 32-bit instruction. Becuase the constant type is 32-bit,
+// but saved in Offset which type is int64, con32class treats it as uint32 type and reclassifies it.
+func (c *ctxt7) con32class(a *obj.Addr) int {
+ v := uint32(a.Offset)
+ if v == 0 {
+ return C_ZCON
+ }
+ if isaddcon(int64(v)) {
+ if v <= 0xFFF {
+ if isbitcon(uint64(v)) {
+ return C_ABCON0
+ }
+ return C_ADDCON0
+ }
+ if isbitcon(uint64(v)) {
+ return C_ABCON
+ }
+ return C_ADDCON
+ }
+
+ t := movcon(int64(v))
+ if t >= 0 {
+ if isbitcon(uint64(v)) {
+ return C_MBCON
+ }
+ return C_MOVCON
+ }
+
+ t = movcon(int64(^v))
+ if t >= 0 {
+ if isbitcon(uint64(v)) {
+ return C_MBCON
+ }
+ return C_MOVCON
+ }
+
+ if isbitcon(uint64(v)) {
+ return C_BITCON
+ }
+
+ if 0 <= v && v <= 0xffffff {
+ return C_ADDCON2
+ }
+ return C_LCON
+}
+
func (c *ctxt7) aclass(a *obj.Addr) int {
switch a.Type {
case obj.TYPE_NONE:
@@ -1514,6 +1580,12 @@ func (c *ctxt7) aclass(a *obj.Addr) int {
if isbitcon(uint64(v)) {
return C_ABCON
}
+ if movcon(v) >= 0 {
+ return C_AMCON
+ }
+ if movcon(^v) >= 0 {
+ return C_AMCON
+ }
return C_ADDCON
}
@@ -1537,6 +1609,10 @@ func (c *ctxt7) aclass(a *obj.Addr) int {
return C_BITCON
}
+ if 0 <= v && v <= 0xffffff {
+ return C_ADDCON2
+ }
+
if uint64(v) == uint64(uint32(v)) || v == int64(int32(v)) {
return C_LCON
}
@@ -1595,8 +1671,41 @@ func (c *ctxt7) oplook(p *obj.Prog) *Optab {
}
a1 = int(p.From.Class)
if a1 == 0 {
- a1 = c.aclass(&p.From) + 1
+ a0 := c.aclass(&p.From)
+ // do not break C_ADDCON2 when S bit is set
+ if (p.As == AADDS || p.As == AADDSW || p.As == ASUBS || p.As == ASUBSW) && a0 == C_ADDCON2 {
+ a0 = C_LCON
+ }
+ a1 = a0 + 1
p.From.Class = int8(a1)
+ // more specific classification of 32-bit integers
+ if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE {
+ if p.As == AMOVW || isADDWop(p.As) {
+ ra0 := c.con32class(&p.From)
+ // do not break C_ADDCON2 when S bit is set
+ if (p.As == AADDSW || p.As == ASUBSW) && ra0 == C_ADDCON2 {
+ ra0 = C_LCON
+ }
+ a1 = ra0 + 1
+ p.From.Class = int8(a1)
+ }
+ if isANDWop(p.As) {
+ switch p.As {
+ case AANDW, AORRW, AEORW, AANDSW, ATSTW:
+ // For 32-bit logical instruction with constant,
+ // rewrite the high 32-bit to be a copy of the low
+ // 32-bit, so that the BITCON test can be shared
+ // for both 32-bit and 64-bit.
+ if a0 == C_BITCON {
+ break
+ }
+ fallthrough
+ default:
+ a1 = c.con32class(&p.From) + 1
+ p.From.Class = int8(a1)
+ }
+ }
+ }
}
a1--
@@ -1667,7 +1776,7 @@ func cmp(a int, b int) bool {
}
case C_ADDCON:
- if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON {
+ if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON || b == C_AMCON {
return true
}
@@ -1677,12 +1786,17 @@ func cmp(a int, b int) bool {
}
case C_MOVCON:
- if b == C_MBCON || b == C_ZCON || b == C_ADDCON0 {
+ if b == C_MBCON || b == C_ZCON || b == C_ADDCON0 || b == C_AMCON {
+ return true
+ }
+
+ case C_ADDCON2:
+ if b == C_ZCON || b == C_ADDCON || b == C_ADDCON0 {
return true
}
case C_LCON:
- if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON {
+ if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON || b == C_ADDCON2 || b == C_AMCON {
return true
}
@@ -3474,6 +3588,19 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
}
o1 |= 0x1c1<<21 | uint32(rs&31)<<16 | uint32(rb&31)<<5 | uint32(rt&31)
+ case 48: /* ADD $C_ADDCON2, Rm, Rd */
+ op := c.opirr(p, p.As)
+ if op&Sbit != 0 {
+ c.ctxt.Diag("can not break addition/subtraction when S bit is set", p)
+ }
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
+ if r == 0 {
+ r = rt
+ }
+ o1 = c.oaddi(p, int32(op), int32(c.regoff(&p.From))&0x000fff, r, rt)
+ o2 = c.oaddi(p, int32(op), int32(c.regoff(&p.From))&0xfff000, rt, rt)
+
case 50: /* sys/sysl */
o1 = c.opirr(p, p.As)
@@ -3640,7 +3767,11 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
if p.Reg == REGTMP {
c.ctxt.Diag("cannot use REGTMP as source: %v\n", p)
}
- o1 = c.omovconst(AMOVD, p, &p.From, REGTMP)
+ if isADDWop(p.As) || isANDWop(p.As) {
+ o1 = c.omovconst(AMOVW, p, &p.From, REGTMP)
+ } else {
+ o1 = c.omovconst(AMOVD, p, &p.From, REGTMP)
+ }
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
@@ -6088,7 +6219,7 @@ func (c *ctxt7) oaddi(p *obj.Prog, o1 int32, v int32, r int, rt int) uint32 {
}
/*
- * load a a literal value into dr
+ * load a literal value into dr
*/
func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 {
var o1 int32
@@ -6165,31 +6296,37 @@ func (c *ctxt7) omovconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint3
return o1
}
- r := 32
- if as == AMOVD {
- r = 64
- }
- d := a.Offset
- s := movcon(d)
- if s < 0 || s >= r {
- d = ^d
- s = movcon(d)
- if s < 0 || s >= r {
- c.ctxt.Diag("impossible move wide: %#x\n%v", uint64(a.Offset), p)
- }
- if as == AMOVD {
- o1 = c.opirr(p, AMOVN)
- } else {
+ if as == AMOVW {
+ d := uint32(a.Offset)
+ s := movcon(int64(d))
+ if s < 0 || 16*s >= 32 {
+ d = ^d
+ s = movcon(int64(d))
+ if s < 0 || 16*s >= 32 {
+ c.ctxt.Diag("impossible 32-bit move wide: %#x\n%v", uint32(a.Offset), p)
+ }
o1 = c.opirr(p, AMOVNW)
- }
- } else {
- if as == AMOVD {
- o1 = c.opirr(p, AMOVZ)
} else {
o1 = c.opirr(p, AMOVZW)
}
+ o1 |= MOVCONST(int64(d), s, rt)
}
- o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
+ if as == AMOVD {
+ d := a.Offset
+ s := movcon(d)
+ if s < 0 || 16*s >= 64 {
+ d = ^d
+ s = movcon(d)
+ if s < 0 || 16*s >= 64 {
+ c.ctxt.Diag("impossible 64-bit move wide: %#x\n%v", uint64(a.Offset), p)
+ }
+ o1 = c.opirr(p, AMOVN)
+ } else {
+ o1 = c.opirr(p, AMOVZ)
+ }
+ o1 |= MOVCONST(d, s, rt)
+ }
+
return o1
}
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index 354bda5e48..f983d5277e 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -344,6 +344,7 @@ const (
AFUNCDATA
AJMP
ANOP
+ APCALIGN
APCDATA
ARET
AGETCALLERPC
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 756170bc55..a36565c9fd 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -35,6 +35,7 @@ import (
"encoding/binary"
"fmt"
"log"
+ "math"
"sort"
)
@@ -342,6 +343,8 @@ var optab = []Optab{
{AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
+ {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
+ {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
{AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
{AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
@@ -599,6 +602,7 @@ var optab = []Optab{
{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
+ {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
}
@@ -607,6 +611,28 @@ var oprange [ALAST & obj.AMask][]Optab
var xcmp [C_NCLASS][C_NCLASS]bool
+// padding bytes to add to align code as requested
+func addpad(pc, a int64, ctxt *obj.Link) int {
+ switch a {
+ case 8:
+ if pc%8 != 0 {
+ return 4
+ }
+ case 16:
+ switch pc % 16 {
+ // When currently aligned to 4, avoid 3 NOPs and set to
+ // 8 byte alignment which should still help.
+ case 4, 12:
+ return 4
+ case 8:
+ return 8
+ }
+ default:
+ ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
+ }
+ return 0
+}
+
func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p := cursym.Func.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
@@ -629,12 +655,16 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
o = c.oplook(p)
m = int(o.size)
if m == 0 {
- if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
- c.ctxt.Diag("zero-width instruction\n%v", p)
+ if p.As == obj.APCALIGN {
+ a := c.vregoff(&p.From)
+ m = addpad(pc, a, ctxt)
+ } else {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
}
- continue
}
-
pc += int64(m)
}
@@ -683,10 +713,15 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
m = int(o.size)
if m == 0 {
- if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
- c.ctxt.Diag("zero-width instruction\n%v", p)
+ if p.As == obj.APCALIGN {
+ a := c.vregoff(&p.From)
+ m = addpad(pc, a, ctxt)
+ } else {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
}
- continue
}
pc += int64(m)
@@ -695,7 +730,10 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
c.cursym.Size = pc
}
- pc += -pc & (funcAlign - 1)
+ if pc%funcAlign != 0 {
+ pc += funcAlign - (pc % funcAlign)
+ }
+
c.cursym.Size = pc
/*
@@ -713,10 +751,19 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if int(o.size) > 4*len(out) {
log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
}
+ origsize := o.size
c.asmout(p, o, out[:])
- for i = 0; i < int32(o.size/4); i++ {
- c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
- bp = bp[4:]
+ if origsize == 0 && o.size > 0 {
+ for i = 0; i < int32(o.size/4); i++ {
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[0])
+ bp = bp[4:]
+ }
+ o.size = origsize
+ } else {
+ for i = 0; i < int32(o.size/4); i++ {
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
+ bp = bp[4:]
+ }
}
}
}
@@ -829,6 +876,18 @@ func (c *ctxt9) aclass(a *obj.Addr) int {
case obj.TYPE_TEXTSIZE:
return C_TEXTSIZE
+ case obj.TYPE_FCONST:
+ // The only cases where FCONST will occur are with float64 +/- 0.
+ // All other float constants are generated in memory.
+ f64 := a.Val.(float64)
+ if f64 == 0 {
+ if math.Signbit(f64) {
+ return C_ADDCON
+ }
+ return C_ZCON
+ }
+ log.Fatalf("Unexpected nonzero FCONST operand %v", a)
+
case obj.TYPE_CONST,
obj.TYPE_ADDR:
switch a.Name {
@@ -1882,6 +1941,7 @@ func buildop(ctxt *obj.Link) {
obj.ATEXT,
obj.AUNDEF,
obj.AFUNCDATA,
+ obj.APCALIGN,
obj.APCDATA,
obj.ADUFFZERO,
obj.ADUFFCOPY:
@@ -2290,6 +2350,19 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
prasm(p)
case 0: /* pseudo ops */
+ if p.As == obj.APCALIGN {
+ aln := c.vregoff(&p.From)
+ v := addpad(p.Pc, aln, c.ctxt)
+ if v > 0 {
+ for i := 0; i < 6; i++ {
+ out[i] = uint32(0)
+ }
+ o.size = int8(v)
+ out[0] = LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
+ return
+ }
+ o.size = 0
+ }
break
case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
@@ -2763,6 +2836,13 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
c.ctxt.Diag("%v is not supported", p)
}
+ case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
+ o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
+ // This is needed for -0.
+ if o.size == 8 {
+ o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
+ }
+
case 25:
/* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
v := c.regoff(&p.From)
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index f42d675805..7a07b5058c 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -67,10 +67,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
case AFMOVD:
if p.From.Type == obj.TYPE_FCONST {
f64 := p.From.Val.(float64)
- p.From.Type = obj.TYPE_MEM
- p.From.Sym = ctxt.Float64Sym(f64)
- p.From.Name = obj.NAME_EXTERN
- p.From.Offset = 0
+ // Constant not needed in memory for float +/- 0
+ if f64 != 0 {
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Float64Sym(f64)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
}
// Put >32-bit constants in memory and load them
diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go
index 3453b71b3b..da938c998a 100644
--- a/src/cmd/internal/obj/util.go
+++ b/src/cmd/internal/obj/util.go
@@ -386,7 +386,7 @@ func offConv(off int64) string {
// opSuffixSet is like regListSet, but for opcode suffixes.
//
// Unlike some other similar structures, uint8 space is not
-// divided by it's own values set (because the're only 256 of them).
+// divided by its own values set (because there are only 256 of them).
// Instead, every arch may interpret/format all 8 bits as they like,
// as long as they register proper cconv function for it.
type opSuffixSet struct {
@@ -535,6 +535,7 @@ var Anames = []string{
"FUNCDATA",
"JMP",
"NOP",
+ "PCALIGN",
"PCDATA",
"RET",
"GETCALLERPC",
diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go
index 8498b40724..b1eae2882b 100644
--- a/src/cmd/internal/obj/wasm/wasmobj.go
+++ b/src/cmd/internal/obj/wasm/wasmobj.go
@@ -705,11 +705,42 @@ func regAddr(reg int16) obj.Addr {
return obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
+// countRegisters returns the number of integer and float registers used by s.
+// It does so by looking for the maximum I* and R* registers.
+func countRegisters(s *obj.LSym) (numI, numF int16) {
+ for p := s.Func.Text; p != nil; p = p.Link {
+ var reg int16
+ switch p.As {
+ case AGet:
+ reg = p.From.Reg
+ case ASet:
+ reg = p.To.Reg
+ case ATee:
+ reg = p.To.Reg
+ default:
+ continue
+ }
+ if reg >= REG_R0 && reg <= REG_R15 {
+ if n := reg - REG_R0 + 1; numI < n {
+ numI = n
+ }
+ } else if reg >= REG_F0 && reg <= REG_F15 {
+ if n := reg - REG_F0 + 1; numF < n {
+ numF = n
+ }
+ }
+ }
+ return
+}
+
func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
w := new(bytes.Buffer)
+ numI, numF := countRegisters(s)
+
// Function starts with declaration of locals: numbers and types.
switch s.Name {
+ // memchr and memcmp don't use the normal Go calling convention and need i32 variables.
case "memchr":
writeUleb128(w, 1) // number of sets of locals
writeUleb128(w, 3) // number of locals
@@ -719,11 +750,23 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
writeUleb128(w, 2) // number of locals
w.WriteByte(0x7F) // i32
default:
- writeUleb128(w, 2) // number of sets of locals
- writeUleb128(w, 16) // number of locals
- w.WriteByte(0x7E) // i64
- writeUleb128(w, 16) // number of locals
- w.WriteByte(0x7C) // f64
+ numTypes := 0
+ if numI > 0 {
+ numTypes++
+ }
+ if numF > 0 {
+ numTypes++
+ }
+
+ writeUleb128(w, uint64(numTypes))
+ if numI > 0 {
+ writeUleb128(w, uint64(numI)) // number of locals
+ w.WriteByte(0x7E) // i64
+ }
+ if numF > 0 {
+ writeUleb128(w, uint64(numF)) // number of locals
+ w.WriteByte(0x7C) // f64
+ }
}
for p := s.Func.Text; p != nil; p = p.Link {
@@ -737,9 +780,12 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
case reg >= REG_PC_F && reg <= REG_RUN:
w.WriteByte(0x23) // get_global
writeUleb128(w, uint64(reg-REG_PC_F))
- case reg >= REG_R0 && reg <= REG_F15:
- w.WriteByte(0x20) // get_local
+ case reg >= REG_R0 && reg <= REG_R15:
+ w.WriteByte(0x20) // get_local (i64)
writeUleb128(w, uint64(reg-REG_R0))
+ case reg >= REG_F0 && reg <= REG_F15:
+ w.WriteByte(0x20) // get_local (f64)
+ writeUleb128(w, uint64(numI+(reg-REG_F0)))
default:
panic("bad Get: invalid register")
}
@@ -761,7 +807,11 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
} else {
w.WriteByte(0x21) // set_local
}
- writeUleb128(w, uint64(reg-REG_R0))
+ if reg <= REG_R15 {
+ writeUleb128(w, uint64(reg-REG_R0))
+ } else {
+ writeUleb128(w, uint64(numI+(reg-REG_F0)))
+ }
default:
panic("bad Set: invalid register")
}
@@ -773,9 +823,12 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
}
reg := p.To.Reg
switch {
- case reg >= REG_R0 && reg <= REG_F15:
- w.WriteByte(0x22) // tee_local
+ case reg >= REG_R0 && reg <= REG_R15:
+ w.WriteByte(0x22) // tee_local (i64)
writeUleb128(w, uint64(reg-REG_R0))
+ case reg >= REG_F0 && reg <= REG_F15:
+ w.WriteByte(0x22) // tee_local (f64)
+ writeUleb128(w, uint64(numI+(reg-REG_F0)))
default:
panic("bad Tee: invalid register")
}
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index d3389e4f15..1d056097c3 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2288,7 +2288,7 @@ func instinit(ctxt *obj.Link) {
}
}
-var isAndroid = (objabi.GOOS == "android")
+var isAndroid = objabi.GOOS == "android"
func prefixof(ctxt *obj.Link, a *obj.Addr) int {
if a.Reg < REG_CS && a.Index < REG_CS { // fast path
@@ -5051,7 +5051,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) {
bad:
if ctxt.Arch.Family != sys.AMD64 {
// here, the assembly has failed.
- // if its a byte instruction that has
+ // if it's a byte instruction that has
// unaddressable registers, try to
// exchange registers and reissue the
// instruction with the operands renamed.
diff --git a/src/cmd/internal/obj/x86/evex.go b/src/cmd/internal/obj/x86/evex.go
index 30c0e62e0f..d8867283fa 100644
--- a/src/cmd/internal/obj/x86/evex.go
+++ b/src/cmd/internal/obj/x86/evex.go
@@ -194,7 +194,7 @@ func newEVEXSuffix() evexSuffix {
return evexSuffix{rounding: rcUnset}
}
-// evexSuffixMap maps obj.X86suffix to it's decoded version.
+// evexSuffixMap maps obj.X86suffix to its decoded version.
// Filled during init().
var evexSuffixMap [255]evexSuffix
diff --git a/src/cmd/internal/obj/x86/ytab.go b/src/cmd/internal/obj/x86/ytab.go
index 14bbaf72a9..7d0b75bf46 100644
--- a/src/cmd/internal/obj/x86/ytab.go
+++ b/src/cmd/internal/obj/x86/ytab.go
@@ -22,7 +22,7 @@ type ytab struct {
// Returns true if yt is compatible with args.
//
-// Elements from args and yt.args are used to
+// Elements from args and yt.args are used
// to index ycover table like `ycover[args[i]+yt.args[i]]`.
// This means that args should contain values that already
// multiplied by Ymax.
diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go
index ffd1c04d39..d1017322f0 100644
--- a/src/cmd/internal/objabi/util.go
+++ b/src/cmd/internal/objabi/util.go
@@ -105,7 +105,6 @@ var (
Fieldtrack_enabled int
Preemptibleloops_enabled int
Clobberdead_enabled int
- DebugCPU_enabled int
)
// Toolchain experiments.
@@ -120,7 +119,6 @@ var exper = []struct {
{"framepointer", &framepointer_enabled},
{"preemptibleloops", &Preemptibleloops_enabled},
{"clobberdead", &Clobberdead_enabled},
- {"debugcpu", &DebugCPU_enabled},
}
var defaultExpstring = Expstring()
diff --git a/src/cmd/internal/xcoff/file.go b/src/cmd/internal/xcoff/file.go
new file mode 100644
index 0000000000..0923b9fcf3
--- /dev/null
+++ b/src/cmd/internal/xcoff/file.go
@@ -0,0 +1,687 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xcoff implements access to XCOFF (Extended Common Object File Format) files.
+package xcoff
+
+import (
+ "debug/dwarf"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// SectionHeader holds information about an XCOFF section header.
+type SectionHeader struct {
+ Name string
+ VirtualAddress uint64
+ Size uint64
+ Type uint32
+ Relptr uint64
+ Nreloc uint32
+}
+
+type Section struct {
+ SectionHeader
+ Relocs []Reloc
+ io.ReaderAt
+ sr *io.SectionReader
+}
+
+// AuxiliaryCSect holds information about an XCOFF symbol in an AUX_CSECT entry.
+type AuxiliaryCSect struct {
+ Length int64
+ StorageMappingClass int
+ SymbolType int
+}
+
+// AuxiliaryFcn holds information about an XCOFF symbol in an AUX_FCN entry.
+type AuxiliaryFcn struct {
+ Size int64
+}
+
+type Symbol struct {
+ Name string
+ Value uint64
+ SectionNumber int
+ StorageClass int
+ AuxFcn AuxiliaryFcn
+ AuxCSect AuxiliaryCSect
+}
+
+type Reloc struct {
+ VirtualAddress uint64
+ Symbol *Symbol
+ Signed bool
+ InstructionFixed bool
+ Length uint8
+ Type uint8
+}
+
+// ImportedSymbol holds information about an imported XCOFF symbol.
+type ImportedSymbol struct {
+ Name string
+ Library string
+}
+
+// FileHeader holds information about an XCOFF file header.
+type FileHeader struct {
+ TargetMachine uint16
+}
+
+// A File represents an open XCOFF file.
+type File struct {
+ FileHeader
+ Sections []*Section
+ Symbols []*Symbol
+ StringTable []byte
+ LibraryPaths []string
+
+ closer io.Closer
+}
+
+// Open opens the named file using os.Open and prepares it for use as an XCOFF binary.
+func Open(name string) (*File, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ ff, err := NewFile(f)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ ff.closer = f
+ return ff, nil
+}
+
+// Close closes the File.
+// If the File was created using NewFile directly instead of Open,
+// Close has no effect.
+func (f *File) Close() error {
+ var err error
+ if f.closer != nil {
+ err = f.closer.Close()
+ f.closer = nil
+ }
+ return err
+}
+
+// Section returns the first section with the given name, or nil if no such
+// section exists.
+// Xcoff have section's name limited to 8 bytes. Some sections like .gosymtab
+// can be trunked but this method will still find them.
+func (f *File) Section(name string) *Section {
+ for _, s := range f.Sections {
+ if s.Name == name || (len(name) > 8 && s.Name == name[:8]) {
+ return s
+ }
+ }
+ return nil
+}
+
+// SectionByType returns the first section in f with the
+// given type, or nil if there is no such section.
+func (f *File) SectionByType(typ uint32) *Section {
+ for _, s := range f.Sections {
+ if s.Type == typ {
+ return s
+ }
+ }
+ return nil
+}
+
+// cstring converts ASCII byte sequence b to string.
+// It stops once it finds 0 or reaches end of b.
+func cstring(b []byte) string {
+ var i int
+ for i = 0; i < len(b) && b[i] != 0; i++ {
+ }
+ return string(b[:i])
+}
+
+// getString extracts a string from an XCOFF string table.
+func getString(st []byte, offset uint32) (string, bool) {
+ if offset < 4 || int(offset) >= len(st) {
+ return "", false
+ }
+ return cstring(st[offset:]), true
+}
+
+// NewFile creates a new File for accessing an XCOFF binary in an underlying reader.
+func NewFile(r io.ReaderAt) (*File, error) {
+ sr := io.NewSectionReader(r, 0, 1<<63-1)
+ // Read XCOFF target machine
+ var magic uint16
+ if err := binary.Read(sr, binary.BigEndian, &magic); err != nil {
+ return nil, err
+ }
+ if magic != U802TOCMAGIC && magic != U64_TOCMAGIC {
+ return nil, fmt.Errorf("unrecognised XCOFF magic: 0x%x", magic)
+ }
+
+ f := new(File)
+ f.TargetMachine = magic
+
+ // Read XCOFF file header
+ if _, err := sr.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ var nscns uint16
+ var symptr uint64
+ var nsyms int32
+ var opthdr uint16
+ var hdrsz int
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ fhdr := new(FileHeader32)
+ if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil {
+ return nil, err
+ }
+ nscns = fhdr.Fnscns
+ symptr = uint64(fhdr.Fsymptr)
+ nsyms = fhdr.Fnsyms
+ opthdr = fhdr.Fopthdr
+ hdrsz = FILHSZ_32
+ case U64_TOCMAGIC:
+ fhdr := new(FileHeader64)
+ if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil {
+ return nil, err
+ }
+ nscns = fhdr.Fnscns
+ symptr = fhdr.Fsymptr
+ nsyms = fhdr.Fnsyms
+ opthdr = fhdr.Fopthdr
+ hdrsz = FILHSZ_64
+ }
+
+ if symptr == 0 || nsyms <= 0 {
+ return nil, fmt.Errorf("no symbol table")
+ }
+
+ // Read string table (located right after symbol table).
+ offset := symptr + uint64(nsyms)*SYMESZ
+ if _, err := sr.Seek(int64(offset), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ // The first 4 bytes contain the length (in bytes).
+ var l uint32
+ if err := binary.Read(sr, binary.BigEndian, &l); err != nil {
+ return nil, err
+ }
+ if l > 4 {
+ if _, err := sr.Seek(int64(offset), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ f.StringTable = make([]byte, l)
+ if _, err := io.ReadFull(sr, f.StringTable); err != nil {
+ return nil, err
+ }
+ }
+
+ // Read section headers
+ if _, err := sr.Seek(int64(hdrsz)+int64(opthdr), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ f.Sections = make([]*Section, nscns)
+ for i := 0; i < int(nscns); i++ {
+ var scnptr uint64
+ s := new(Section)
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ shdr := new(SectionHeader32)
+ if err := binary.Read(sr, binary.BigEndian, shdr); err != nil {
+ return nil, err
+ }
+ s.Name = cstring(shdr.Sname[:])
+ s.VirtualAddress = uint64(shdr.Svaddr)
+ s.Size = uint64(shdr.Ssize)
+ scnptr = uint64(shdr.Sscnptr)
+ s.Type = shdr.Sflags
+ s.Relptr = uint64(shdr.Srelptr)
+ s.Nreloc = uint32(shdr.Snreloc)
+ case U64_TOCMAGIC:
+ shdr := new(SectionHeader64)
+ if err := binary.Read(sr, binary.BigEndian, shdr); err != nil {
+ return nil, err
+ }
+ s.Name = cstring(shdr.Sname[:])
+ s.VirtualAddress = shdr.Svaddr
+ s.Size = shdr.Ssize
+ scnptr = shdr.Sscnptr
+ s.Type = shdr.Sflags
+ s.Relptr = shdr.Srelptr
+ s.Nreloc = shdr.Snreloc
+ }
+ r2 := r
+ if scnptr == 0 { // .bss must have all 0s
+ r2 = zeroReaderAt{}
+ }
+ s.sr = io.NewSectionReader(r2, int64(scnptr), int64(s.Size))
+ s.ReaderAt = s.sr
+ f.Sections[i] = s
+ }
+
+ // Symbol map needed by relocation
+ var idxToSym = make(map[int]*Symbol)
+
+ // Read symbol table
+ if _, err := sr.Seek(int64(symptr), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ f.Symbols = make([]*Symbol, 0)
+ for i := 0; i < int(nsyms); i++ {
+ var numaux int
+ var ok, needAuxFcn bool
+ sym := new(Symbol)
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ se := new(SymEnt32)
+ if err := binary.Read(sr, binary.BigEndian, se); err != nil {
+ return nil, err
+ }
+ numaux = int(se.Nnumaux)
+ sym.SectionNumber = int(se.Nscnum)
+ sym.StorageClass = int(se.Nsclass)
+ sym.Value = uint64(se.Nvalue)
+ needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1
+ zeroes := binary.BigEndian.Uint32(se.Nname[:4])
+ if zeroes != 0 {
+ sym.Name = cstring(se.Nname[:])
+ } else {
+ offset := binary.BigEndian.Uint32(se.Nname[4:])
+ sym.Name, ok = getString(f.StringTable, offset)
+ if !ok {
+ goto skip
+ }
+ }
+ case U64_TOCMAGIC:
+ se := new(SymEnt64)
+ if err := binary.Read(sr, binary.BigEndian, se); err != nil {
+ return nil, err
+ }
+ numaux = int(se.Nnumaux)
+ sym.SectionNumber = int(se.Nscnum)
+ sym.StorageClass = int(se.Nsclass)
+ sym.Value = se.Nvalue
+ needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1
+ sym.Name, ok = getString(f.StringTable, se.Noffset)
+ if !ok {
+ goto skip
+ }
+ }
+ if sym.StorageClass != C_EXT && sym.StorageClass != C_WEAKEXT && sym.StorageClass != C_HIDEXT {
+ goto skip
+ }
+ // Must have at least one csect auxiliary entry.
+ if numaux < 1 || i+numaux >= int(nsyms) {
+ goto skip
+ }
+
+ if sym.SectionNumber > int(nscns) {
+ goto skip
+ }
+ if sym.SectionNumber == 0 {
+ sym.Value = 0
+ } else {
+ sym.Value -= f.Sections[sym.SectionNumber-1].VirtualAddress
+ }
+
+ idxToSym[i] = sym
+
+ // If this symbol is a function, it must retrieve its size from
+ // its AUX_FCN entry.
+ // It can happend that a function symbol doesn't have any AUX_FCN.
+ // In this case, needAuxFcn is false and their size will be set to 0
+ if needAuxFcn {
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ aux := new(AuxFcn32)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxFcn.Size = int64(aux.Xfsize)
+ case U64_TOCMAGIC:
+ aux := new(AuxFcn64)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxFcn.Size = int64(aux.Xfsize)
+ }
+ }
+
+ // Read csect auxiliary entry (by convention, it is the last).
+ if !needAuxFcn {
+ if _, err := sr.Seek(int64(numaux-1)*SYMESZ, os.SEEK_CUR); err != nil {
+ return nil, err
+ }
+ }
+ i += numaux
+ numaux = 0
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ aux := new(AuxCSect32)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7)
+ sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas)
+ sym.AuxCSect.Length = int64(aux.Xscnlen)
+ case U64_TOCMAGIC:
+ aux := new(AuxCSect64)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7)
+ sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas)
+ sym.AuxCSect.Length = int64(aux.Xscnlenhi)<<32 | int64(aux.Xscnlenlo)
+ }
+ f.Symbols = append(f.Symbols, sym)
+ skip:
+ i += numaux // Skip auxiliary entries
+ if _, err := sr.Seek(int64(numaux)*SYMESZ, os.SEEK_CUR); err != nil {
+ return nil, err
+ }
+ }
+
+ // Read relocations
+ // Only for .data or .text section
+ for _, sect := range f.Sections {
+ if sect.Type != STYP_TEXT && sect.Type != STYP_DATA {
+ continue
+ }
+ sect.Relocs = make([]Reloc, sect.Nreloc)
+ if sect.Relptr == 0 {
+ continue
+ }
+ if _, err := sr.Seek(int64(sect.Relptr), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ for i := uint32(0); i < sect.Nreloc; i++ {
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ rel := new(Reloc32)
+ if err := binary.Read(sr, binary.BigEndian, rel); err != nil {
+ return nil, err
+ }
+ sect.Relocs[i].VirtualAddress = uint64(rel.Rvaddr)
+ sect.Relocs[i].Symbol = idxToSym[int(rel.Rsymndx)]
+ sect.Relocs[i].Type = rel.Rtype
+ sect.Relocs[i].Length = rel.Rsize&0x3F + 1
+
+ if rel.Rsize&0x80 == 1 {
+ sect.Relocs[i].Signed = true
+ }
+ if rel.Rsize&0x40 == 1 {
+ sect.Relocs[i].InstructionFixed = true
+ }
+
+ case U64_TOCMAGIC:
+ rel := new(Reloc64)
+ if err := binary.Read(sr, binary.BigEndian, rel); err != nil {
+ return nil, err
+ }
+ sect.Relocs[i].VirtualAddress = rel.Rvaddr
+ sect.Relocs[i].Symbol = idxToSym[int(rel.Rsymndx)]
+ sect.Relocs[i].Type = rel.Rtype
+ sect.Relocs[i].Length = rel.Rsize&0x3F + 1
+ if rel.Rsize&0x80 == 1 {
+ sect.Relocs[i].Signed = true
+ }
+ if rel.Rsize&0x40 == 1 {
+ sect.Relocs[i].InstructionFixed = true
+ }
+ }
+ }
+ }
+
+ return f, nil
+}
+
+// zeroReaderAt is ReaderAt that reads 0s.
+type zeroReaderAt struct{}
+
+// ReadAt writes len(p) 0s into p.
+func (w zeroReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+ for i := range p {
+ p[i] = 0
+ }
+ return len(p), nil
+}
+
+// Data reads and returns the contents of the XCOFF section s.
+func (s *Section) Data() ([]byte, error) {
+ dat := make([]byte, s.sr.Size())
+ n, err := s.sr.ReadAt(dat, 0)
+ if n == len(dat) {
+ err = nil
+ }
+ return dat[:n], err
+}
+
+// CSect reads and returns the contents of a csect.
+func (f *File) CSect(name string) []byte {
+ for _, sym := range f.Symbols {
+ if sym.Name == name && sym.AuxCSect.SymbolType == XTY_SD {
+ if i := sym.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
+ s := f.Sections[i]
+ if sym.Value+uint64(sym.AuxCSect.Length) <= s.Size {
+ dat := make([]byte, sym.AuxCSect.Length)
+ _, err := s.sr.ReadAt(dat, int64(sym.Value))
+ if err != nil {
+ return nil
+ }
+ return dat
+ }
+ }
+ break
+ }
+ }
+ return nil
+}
+
+func (f *File) DWARF() (*dwarf.Data, error) {
+ // There are many other DWARF sections, but these
+ // are the ones the debug/dwarf package uses.
+ // Don't bother loading others.
+ var subtypes = [...]uint32{SSUBTYP_DWABREV, SSUBTYP_DWINFO, SSUBTYP_DWLINE, SSUBTYP_DWRNGES, SSUBTYP_DWSTR}
+ var dat [len(subtypes)][]byte
+ for i, subtype := range subtypes {
+ s := f.SectionByType(STYP_DWARF | subtype)
+ if s != nil {
+ b, err := s.Data()
+ if err != nil && uint64(len(b)) < s.Size {
+ return nil, err
+ }
+ dat[i] = b
+ }
+ }
+
+ abbrev, info, line, ranges, str := dat[0], dat[1], dat[2], dat[3], dat[4]
+ return dwarf.New(abbrev, nil, nil, info, line, nil, ranges, str)
+}
+
+// readImportID returns the import file IDs stored inside the .loader section.
+// Library name pattern is either path/base/member or base/member
+func (f *File) readImportIDs(s *Section) ([]string, error) {
+ // Read loader header
+ if _, err := s.sr.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ var istlen uint32
+ var nimpid int32
+ var impoff uint64
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ lhdr := new(LoaderHeader32)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ istlen = lhdr.Listlen
+ nimpid = lhdr.Lnimpid
+ impoff = uint64(lhdr.Limpoff)
+ case U64_TOCMAGIC:
+ lhdr := new(LoaderHeader64)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ istlen = lhdr.Listlen
+ nimpid = lhdr.Lnimpid
+ impoff = lhdr.Limpoff
+ }
+
+ // Read loader import file ID table
+ if _, err := s.sr.Seek(int64(impoff), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ table := make([]byte, istlen)
+ if _, err := io.ReadFull(s.sr, table); err != nil {
+ return nil, err
+ }
+
+ offset := 0
+ // First import file ID is the default LIBPATH value
+ libpath := cstring(table[offset:])
+ f.LibraryPaths = strings.Split(libpath, ":")
+ offset += len(libpath) + 3 // 3 null bytes
+ all := make([]string, 0)
+ for i := 1; i < int(nimpid); i++ {
+ impidpath := cstring(table[offset:])
+ offset += len(impidpath) + 1
+ impidbase := cstring(table[offset:])
+ offset += len(impidbase) + 1
+ impidmem := cstring(table[offset:])
+ offset += len(impidmem) + 1
+ var path string
+ if len(impidpath) > 0 {
+ path = impidpath + "/" + impidbase + "/" + impidmem
+ } else {
+ path = impidbase + "/" + impidmem
+ }
+ all = append(all, path)
+ }
+
+ return all, nil
+}
+
+// ImportedSymbols returns the names of all symbols
+// referred to by the binary f that are expected to be
+// satisfied by other libraries at dynamic load time.
+// It does not return weak symbols.
+func (f *File) ImportedSymbols() ([]ImportedSymbol, error) {
+ s := f.SectionByType(STYP_LOADER)
+ if s == nil {
+ return nil, nil
+ }
+ // Read loader header
+ if _, err := s.sr.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ var stlen uint32
+ var stoff uint64
+ var nsyms int32
+ var symoff uint64
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ lhdr := new(LoaderHeader32)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ stlen = lhdr.Lstlen
+ stoff = uint64(lhdr.Lstoff)
+ nsyms = lhdr.Lnsyms
+ symoff = LDHDRSZ_32
+ case U64_TOCMAGIC:
+ lhdr := new(LoaderHeader64)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ stlen = lhdr.Lstlen
+ stoff = lhdr.Lstoff
+ nsyms = lhdr.Lnsyms
+ symoff = lhdr.Lsymoff
+ }
+
+ // Read loader section string table
+ if _, err := s.sr.Seek(int64(stoff), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ st := make([]byte, stlen)
+ if _, err := io.ReadFull(s.sr, st); err != nil {
+ return nil, err
+ }
+
+ // Read imported libraries
+ libs, err := f.readImportIDs(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // Read loader symbol table
+ if _, err := s.sr.Seek(int64(symoff), os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ all := make([]ImportedSymbol, 0)
+ for i := 0; i < int(nsyms); i++ {
+ var name string
+ var ifile int32
+ var ok bool
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ ldsym := new(LoaderSymbol32)
+ if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil {
+ return nil, err
+ }
+ if ldsym.Lsmtype&0x40 == 0 {
+ continue // Imported symbols only
+ }
+ zeroes := binary.BigEndian.Uint32(ldsym.Lname[:4])
+ if zeroes != 0 {
+ name = cstring(ldsym.Lname[:])
+ } else {
+ offset := binary.BigEndian.Uint32(ldsym.Lname[4:])
+ name, ok = getString(st, offset)
+ if !ok {
+ continue
+ }
+ }
+ ifile = ldsym.Lifile
+ case U64_TOCMAGIC:
+ ldsym := new(LoaderSymbol64)
+ if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil {
+ return nil, err
+ }
+ if ldsym.Lsmtype&0x40 == 0 {
+ continue // Imported symbols only
+ }
+ name, ok = getString(st, ldsym.Loffset)
+ if !ok {
+ continue
+ }
+ ifile = ldsym.Lifile
+ }
+ var sym ImportedSymbol
+ sym.Name = name
+ if ifile >= 1 && int(ifile) <= len(libs) {
+ sym.Library = libs[ifile-1]
+ }
+ all = append(all, sym)
+ }
+
+ return all, nil
+}
+
+// ImportedLibraries returns the names of all libraries
+// referred to by the binary f that are expected to be
+// linked with the binary at dynamic link time.
+func (f *File) ImportedLibraries() ([]string, error) {
+ s := f.SectionByType(STYP_LOADER)
+ if s == nil {
+ return nil, nil
+ }
+ all, err := f.readImportIDs(s)
+ return all, err
+}
diff --git a/src/cmd/internal/xcoff/file_test.go b/src/cmd/internal/xcoff/file_test.go
new file mode 100644
index 0000000000..a6722e9453
--- /dev/null
+++ b/src/cmd/internal/xcoff/file_test.go
@@ -0,0 +1,102 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xcoff
+
+import (
+ "reflect"
+ "testing"
+)
+
+type fileTest struct {
+ file string
+ hdr FileHeader
+ sections []*SectionHeader
+ needed []string
+}
+
+var fileTests = []fileTest{
+ {
+ "testdata/gcc-ppc32-aix-dwarf2-exec",
+ FileHeader{U802TOCMAGIC},
+ []*SectionHeader{
+ {".text", 0x10000290, 0x00000bbd, STYP_TEXT, 0x7ae6, 0x36},
+ {".data", 0x20000e4d, 0x00000437, STYP_DATA, 0x7d02, 0x2b},
+ {".bss", 0x20001284, 0x0000021c, STYP_BSS, 0, 0},
+ {".loader", 0x00000000, 0x000004b3, STYP_LOADER, 0, 0},
+ {".dwline", 0x00000000, 0x000000df, STYP_DWARF | SSUBTYP_DWLINE, 0x7eb0, 0x7},
+ {".dwinfo", 0x00000000, 0x00000314, STYP_DWARF | SSUBTYP_DWINFO, 0x7ef6, 0xa},
+ {".dwabrev", 0x00000000, 0x000000d6, STYP_DWARF | SSUBTYP_DWABREV, 0, 0},
+ {".dwarnge", 0x00000000, 0x00000020, STYP_DWARF | SSUBTYP_DWARNGE, 0x7f5a, 0x2},
+ {".dwloc", 0x00000000, 0x00000074, STYP_DWARF | SSUBTYP_DWLOC, 0, 0},
+ {".debug", 0x00000000, 0x00005e4f, STYP_DEBUG, 0, 0},
+ },
+ []string{"libc.a/shr.o"},
+ },
+ {
+ "testdata/gcc-ppc64-aix-dwarf2-exec",
+ FileHeader{U64_TOCMAGIC},
+ []*SectionHeader{
+ {".text", 0x10000480, 0x00000afd, STYP_TEXT, 0x8322, 0x34},
+ {".data", 0x20000f7d, 0x000002f3, STYP_DATA, 0x85fa, 0x25},
+ {".bss", 0x20001270, 0x00000428, STYP_BSS, 0, 0},
+ {".loader", 0x00000000, 0x00000535, STYP_LOADER, 0, 0},
+ {".dwline", 0x00000000, 0x000000b4, STYP_DWARF | SSUBTYP_DWLINE, 0x8800, 0x4},
+ {".dwinfo", 0x00000000, 0x0000036a, STYP_DWARF | SSUBTYP_DWINFO, 0x8838, 0x7},
+ {".dwabrev", 0x00000000, 0x000000b5, STYP_DWARF | SSUBTYP_DWABREV, 0, 0},
+ {".dwarnge", 0x00000000, 0x00000040, STYP_DWARF | SSUBTYP_DWARNGE, 0x889a, 0x2},
+ {".dwloc", 0x00000000, 0x00000062, STYP_DWARF | SSUBTYP_DWLOC, 0, 0},
+ {".debug", 0x00000000, 0x00006605, STYP_DEBUG, 0, 0},
+ },
+ []string{"libc.a/shr_64.o"},
+ },
+}
+
+func TestOpen(t *testing.T) {
+ for i := range fileTests {
+ tt := &fileTests[i]
+
+ f, err := Open(tt.file)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(f.FileHeader, tt.hdr) {
+ t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.FileHeader, tt.hdr)
+ continue
+ }
+
+ for i, sh := range f.Sections {
+ if i >= len(tt.sections) {
+ break
+ }
+ have := &sh.SectionHeader
+ want := tt.sections[i]
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("open %s, section %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want)
+ }
+ }
+ tn := len(tt.sections)
+ fn := len(f.Sections)
+ if tn != fn {
+ t.Errorf("open %s: len(Sections) = %d, want %d", tt.file, fn, tn)
+ }
+ tl := tt.needed
+ fl, err := f.ImportedLibraries()
+ if err != nil {
+ t.Error(err)
+ }
+ if !reflect.DeepEqual(tl, fl) {
+ t.Errorf("open %s: loader import = %v, want %v", tt.file, tl, fl)
+ }
+ }
+}
+
+func TestOpenFailure(t *testing.T) {
+ filename := "file.go" // not an XCOFF object file
+ _, err := Open(filename) // don't crash
+ if err == nil {
+ t.Errorf("open %s: succeeded unexpectedly", filename)
+ }
+}
diff --git a/src/cmd/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec b/src/cmd/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec
new file mode 100644
index 0000000000..810e21a0df
Binary files /dev/null and b/src/cmd/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec differ
diff --git a/src/cmd/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec b/src/cmd/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec
new file mode 100644
index 0000000000..707d01ebd4
Binary files /dev/null and b/src/cmd/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec differ
diff --git a/src/cmd/internal/xcoff/testdata/hello.c b/src/cmd/internal/xcoff/testdata/hello.c
new file mode 100644
index 0000000000..34d9ee7923
--- /dev/null
+++ b/src/cmd/internal/xcoff/testdata/hello.c
@@ -0,0 +1,7 @@
+#include
+
+void
+main(int argc, char *argv[])
+{
+ printf("hello, world\n");
+}
diff --git a/src/cmd/internal/xcoff/xcoff.go b/src/cmd/internal/xcoff/xcoff.go
new file mode 100644
index 0000000000..f8465d7289
--- /dev/null
+++ b/src/cmd/internal/xcoff/xcoff.go
@@ -0,0 +1,367 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xcoff
+
+// File Header.
+type FileHeader32 struct {
+ Fmagic uint16 // Target machine
+ Fnscns uint16 // Number of sections
+ Ftimedat int32 // Time and date of file creation
+ Fsymptr uint32 // Byte offset to symbol table start
+ Fnsyms int32 // Number of entries in symbol table
+ Fopthdr uint16 // Number of bytes in optional header
+ Fflags uint16 // Flags
+}
+
+type FileHeader64 struct {
+ Fmagic uint16 // Target machine
+ Fnscns uint16 // Number of sections
+ Ftimedat int32 // Time and date of file creation
+ Fsymptr uint64 // Byte offset to symbol table start
+ Fopthdr uint16 // Number of bytes in optional header
+ Fflags uint16 // Flags
+ Fnsyms int32 // Number of entries in symbol table
+}
+
+const (
+ FILHSZ_32 = 20
+ FILHSZ_64 = 24
+)
+const (
+ U802TOCMAGIC = 0737 // AIX 32-bit XCOFF
+ U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF
+)
+
+// Flags that describe the type of the object file.
+const (
+ F_RELFLG = 0x0001
+ F_EXEC = 0x0002
+ F_LNNO = 0x0004
+ F_FDPR_PROF = 0x0010
+ F_FDPR_OPTI = 0x0020
+ F_DSA = 0x0040
+ F_VARPG = 0x0100
+ F_DYNLOAD = 0x1000
+ F_SHROBJ = 0x2000
+ F_LOADONLY = 0x4000
+)
+
+// Section Header.
+type SectionHeader32 struct {
+ Sname [8]byte // Section name
+ Spaddr uint32 // Physical address
+ Svaddr uint32 // Virtual address
+ Ssize uint32 // Section size
+ Sscnptr uint32 // Offset in file to raw data for section
+ Srelptr uint32 // Offset in file to relocation entries for section
+ Slnnoptr uint32 // Offset in file to line number entries for section
+ Snreloc uint16 // Number of relocation entries
+ Snlnno uint16 // Number of line number entries
+ Sflags uint32 // Flags to define the section type
+}
+
+type SectionHeader64 struct {
+ Sname [8]byte // Section name
+ Spaddr uint64 // Physical address
+ Svaddr uint64 // Virtual address
+ Ssize uint64 // Section size
+ Sscnptr uint64 // Offset in file to raw data for section
+ Srelptr uint64 // Offset in file to relocation entries for section
+ Slnnoptr uint64 // Offset in file to line number entries for section
+ Snreloc uint32 // Number of relocation entries
+ Snlnno uint32 // Number of line number entries
+ Sflags uint32 // Flags to define the section type
+ Spad uint32 // Needs to be 72 bytes long
+}
+
+// Flags defining the section type.
+const (
+ STYP_DWARF = 0x0010
+ STYP_TEXT = 0x0020
+ STYP_DATA = 0x0040
+ STYP_BSS = 0x0080
+ STYP_EXCEPT = 0x0100
+ STYP_INFO = 0x0200
+ STYP_TDATA = 0x0400
+ STYP_TBSS = 0x0800
+ STYP_LOADER = 0x1000
+ STYP_DEBUG = 0x2000
+ STYP_TYPCHK = 0x4000
+ STYP_OVRFLO = 0x8000
+)
+const (
+ SSUBTYP_DWINFO = 0x10000 // DWARF info section
+ SSUBTYP_DWLINE = 0x20000 // DWARF line-number section
+ SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section
+ SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section
+ SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section
+ SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section
+ SSUBTYP_DWSTR = 0x70000 // DWARF strings section
+ SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section
+ SSUBTYP_DWLOC = 0x90000 // DWARF location lists section
+ SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section
+ SSUBTYP_DWMAC = 0xB0000 // DWARF macros section
+)
+
+// Symbol Table Entry.
+type SymEnt32 struct {
+ Nname [8]byte // Symbol name
+ Nvalue uint32 // Symbol value
+ Nscnum int16 // Section number of symbol
+ Ntype uint16 // Basic and derived type specification
+ Nsclass int8 // Storage class of symbol
+ Nnumaux int8 // Number of auxiliary entries
+}
+
+type SymEnt64 struct {
+ Nvalue uint64 // Symbol value
+ Noffset uint32 // Offset of the name in string table or .debug section
+ Nscnum int16 // Section number of symbol
+ Ntype uint16 // Basic and derived type specification
+ Nsclass int8 // Storage class of symbol
+ Nnumaux int8 // Number of auxiliary entries
+}
+
+const SYMESZ = 18
+
+const (
+ // Nscnum
+ N_DEBUG = -2
+ N_ABS = -1
+ N_UNDEF = 0
+
+ //Ntype
+ SYM_V_INTERNAL = 0x1000
+ SYM_V_HIDDEN = 0x2000
+ SYM_V_PROTECTED = 0x3000
+ SYM_V_EXPORTED = 0x4000
+ SYM_TYPE_FUNC = 0x0020 // is function
+)
+
+// Storage Class.
+const (
+ C_NULL = 0 // Symbol table entry marked for deletion
+ C_EXT = 2 // External symbol
+ C_STAT = 3 // Static symbol
+ C_BLOCK = 100 // Beginning or end of inner block
+ C_FCN = 101 // Beginning or end of function
+ C_FILE = 103 // Source file name and compiler information
+ C_HIDEXT = 107 // Unnamed external symbol
+ C_BINCL = 108 // Beginning of include file
+ C_EINCL = 109 // End of include file
+ C_WEAKEXT = 111 // Weak external symbol
+ C_DWARF = 112 // DWARF symbol
+ C_GSYM = 128 // Global variable
+ C_LSYM = 129 // Automatic variable allocated on stack
+ C_PSYM = 130 // Argument to subroutine allocated on stack
+ C_RSYM = 131 // Register variable
+ C_RPSYM = 132 // Argument to function or procedure stored in register
+ C_STSYM = 133 // Statically allocated symbol
+ C_BCOMM = 135 // Beginning of common block
+ C_ECOML = 136 // Local member of common block
+ C_ECOMM = 137 // End of common block
+ C_DECL = 140 // Declaration of object
+ C_ENTRY = 141 // Alternate entry
+ C_FUN = 142 // Function or procedure
+ C_BSTAT = 143 // Beginning of static block
+ C_ESTAT = 144 // End of static block
+ C_GTLS = 145 // Global thread-local variable
+ C_STTLS = 146 // Static thread-local variable
+)
+
+// File Auxiliary Entry
+type AuxFile64 struct {
+ Xfname [8]byte // Name or offset inside string table
+ Xftype uint8 // Source file string type
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+// Function Auxiliary Entry
+type AuxFcn32 struct {
+ Xexptr uint32 // File offset to exception table entry
+ Xfsize uint32 // Size of function in bytes
+ Xlnnoptr uint32 // File pointer to line number
+ Xendndx uint32 // Symbol table index of next entry
+ Xpad uint16 // Unused
+}
+type AuxFcn64 struct {
+ Xlnnoptr uint64 // File pointer to line number
+ Xfsize uint32 // Size of function in bytes
+ Xendndx uint32 // Symbol table index of next entry
+ Xpad uint8 // Unused
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+type AuxSect64 struct {
+ Xscnlen uint64 // section length
+ Xnreloc uint64 // Num RLDs
+ pad uint8
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+// csect Auxiliary Entry.
+type AuxCSect32 struct {
+ Xscnlen int32 // Length or symbol table index
+ Xparmhash uint32 // Offset of parameter type-check string
+ Xsnhash uint16 // .typchk section number
+ Xsmtyp uint8 // Symbol alignment and type
+ Xsmclas uint8 // Storage-mapping class
+ Xstab uint32 // Reserved
+ Xsnstab uint16 // Reserved
+}
+
+type AuxCSect64 struct {
+ Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index
+ Xparmhash uint32 // Offset of parameter type-check string
+ Xsnhash uint16 // .typchk section number
+ Xsmtyp uint8 // Symbol alignment and type
+ Xsmclas uint8 // Storage-mapping class
+ Xscnlenhi int32 // Upper 4 bytes of length or symbol table index
+ Xpad uint8 // Unused
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+// Auxiliary type
+const (
+ _AUX_EXCEPT = 255
+ _AUX_FCN = 254
+ _AUX_SYM = 253
+ _AUX_FILE = 252
+ _AUX_CSECT = 251
+ _AUX_SECT = 250
+)
+
+// Symbol type field.
+const (
+ XTY_ER = 0 // External reference
+ XTY_SD = 1 // Section definition
+ XTY_LD = 2 // Label definition
+ XTY_CM = 3 // Common csect definition
+)
+
+// Defines for File auxiliary definitions: x_ftype field of x_file
+const (
+ XFT_FN = 0 // Source File Name
+ XFT_CT = 1 // Compile Time Stamp
+ XFT_CV = 2 // Compiler Version Number
+ XFT_CD = 128 // Compiler Defined Information
+)
+
+// Storage-mapping class.
+const (
+ XMC_PR = 0 // Program code
+ XMC_RO = 1 // Read-only constant
+ XMC_DB = 2 // Debug dictionary table
+ XMC_TC = 3 // TOC entry
+ XMC_UA = 4 // Unclassified
+ XMC_RW = 5 // Read/Write data
+ XMC_GL = 6 // Global linkage
+ XMC_XO = 7 // Extended operation
+ XMC_SV = 8 // 32-bit supervisor call descriptor
+ XMC_BS = 9 // BSS class
+ XMC_DS = 10 // Function descriptor
+ XMC_UC = 11 // Unnamed FORTRAN common
+ XMC_TC0 = 15 // TOC anchor
+ XMC_TD = 16 // Scalar data entry in the TOC
+ XMC_SV64 = 17 // 64-bit supervisor call descriptor
+ XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit
+ XMC_TL = 20 // Read/Write thread-local data
+ XMC_UL = 21 // Read/Write thread-local data (.tbss)
+ XMC_TE = 22 // TOC entry
+)
+
+// Loader Header.
+type LoaderHeader32 struct {
+ Lversion int32 // Loader section version number
+ Lnsyms int32 // Number of symbol table entries
+ Lnreloc int32 // Number of relocation table entries
+ Listlen uint32 // Length of import file ID string table
+ Lnimpid int32 // Number of import file IDs
+ Limpoff uint32 // Offset to start of import file IDs
+ Lstlen uint32 // Length of string table
+ Lstoff uint32 // Offset to start of string table
+}
+
+type LoaderHeader64 struct {
+ Lversion int32 // Loader section version number
+ Lnsyms int32 // Number of symbol table entries
+ Lnreloc int32 // Number of relocation table entries
+ Listlen uint32 // Length of import file ID string table
+ Lnimpid int32 // Number of import file IDs
+ Lstlen uint32 // Length of string table
+ Limpoff uint64 // Offset to start of import file IDs
+ Lstoff uint64 // Offset to start of string table
+ Lsymoff uint64 // Offset to start of symbol table
+ Lrldoff uint64 // Offset to start of relocation entries
+}
+
+const (
+ LDHDRSZ_32 = 32
+ LDHDRSZ_64 = 56
+)
+
+// Loader Symbol.
+type LoaderSymbol32 struct {
+ Lname [8]byte // Symbol name or byte offset into string table
+ Lvalue uint32 // Address field
+ Lscnum int16 // Section number containing symbol
+ Lsmtype int8 // Symbol type, export, import flags
+ Lsmclas int8 // Symbol storage class
+ Lifile int32 // Import file ID; ordinal of import file IDs
+ Lparm uint32 // Parameter type-check field
+}
+
+type LoaderSymbol64 struct {
+ Lvalue uint64 // Address field
+ Loffset uint32 // Byte offset into string table of symbol name
+ Lscnum int16 // Section number containing symbol
+ Lsmtype int8 // Symbol type, export, import flags
+ Lsmclas int8 // Symbol storage class
+ Lifile int32 // Import file ID; ordinal of import file IDs
+ Lparm uint32 // Parameter type-check field
+}
+
+type Reloc32 struct {
+ Rvaddr uint32 // (virtual) address of reference
+ Rsymndx uint32 // Index into symbol table
+ Rsize uint8 // Sign and reloc bit len
+ Rtype uint8 // Toc relocation type
+}
+
+type Reloc64 struct {
+ Rvaddr uint64 // (virtual) address of reference
+ Rsymndx uint32 // Index into symbol table
+ Rsize uint8 // Sign and reloc bit len
+ Rtype uint8 // Toc relocation type
+}
+
+const (
+ R_POS = 0x00 // A(sym) Positive Relocation
+ R_NEG = 0x01 // -A(sym) Negative Relocation
+ R_REL = 0x02 // A(sym-*) Relative to self
+ R_TOC = 0x03 // A(sym-TOC) Relative to TOC
+ R_TRL = 0x12 // A(sym-TOC) TOC Relative indirect load.
+
+ R_TRLA = 0x13 // A(sym-TOC) TOC Rel load address. modifiable inst
+ R_GL = 0x05 // A(external TOC of sym) Global Linkage
+ R_TCL = 0x06 // A(local TOC of sym) Local object TOC address
+ R_RL = 0x0C // A(sym) Pos indirect load. modifiable instruction
+ R_RLA = 0x0D // A(sym) Pos Load Address. modifiable instruction
+ R_REF = 0x0F // AL0(sym) Non relocating ref. No garbage collect
+ R_BA = 0x08 // A(sym) Branch absolute. Cannot modify instruction
+ R_RBA = 0x18 // A(sym) Branch absolute. modifiable instruction
+ R_BR = 0x0A // A(sym-*) Branch rel to self. non modifiable
+ R_RBR = 0x1A // A(sym-*) Branch rel to self. modifiable instr
+
+ R_TLS = 0x20 // General-dynamic reference to TLS symbol
+ R_TLS_IE = 0x21 // Initial-exec reference to TLS symbol
+ R_TLS_LD = 0x22 // Local-dynamic reference to TLS symbol
+ R_TLS_LE = 0x23 // Local-exec reference to TLS symbol
+ R_TLSM = 0x24 // Module reference to TLS symbol
+ R_TLSML = 0x25 // Module reference to local (own) module
+
+ R_TOCU = 0x30 // Relative to TOC - high order bits
+ R_TOCL = 0x31 // Relative to TOC - low order bits
+)
diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
index ee98aef20d..3cc9e294d2 100644
--- a/src/cmd/link/internal/ld/data.go
+++ b/src/cmd/link/internal/ld/data.go
@@ -529,11 +529,7 @@ func (ctxt *Link) reloc() {
}
}
-func windynrelocsym(ctxt *Link, s *sym.Symbol) {
- rel := ctxt.Syms.Lookup(".rel", 0)
- if s == rel {
- return
- }
+func windynrelocsym(ctxt *Link, rel, s *sym.Symbol) {
for ri := range s.R {
r := &s.R[ri]
targ := r.Sym
@@ -576,14 +572,31 @@ func windynrelocsym(ctxt *Link, s *sym.Symbol) {
}
}
-func dynrelocsym(ctxt *Link, s *sym.Symbol) {
- if ctxt.HeadType == objabi.Hwindows {
- if ctxt.LinkMode == LinkInternal {
- windynrelocsym(ctxt, s)
- }
+// windynrelocsyms generates jump table to C library functions that will be
+// added later. windynrelocsyms writes the table into .rel symbol.
+func (ctxt *Link) windynrelocsyms() {
+ if !(ctxt.HeadType == objabi.Hwindows && iscgo && ctxt.LinkMode == LinkInternal) {
return
}
+ if ctxt.Debugvlog != 0 {
+ ctxt.Logf("%5.2f windynrelocsyms\n", Cputime())
+ }
+ /* relocation table */
+ rel := ctxt.Syms.Lookup(".rel", 0)
+ rel.Attr |= sym.AttrReachable
+ rel.Type = sym.STEXT
+ ctxt.Textp = append(ctxt.Textp, rel)
+
+ for _, s := range ctxt.Textp {
+ if s == rel {
+ continue
+ }
+ windynrelocsym(ctxt, rel, s)
+ }
+}
+
+func dynrelocsym(ctxt *Link, s *sym.Symbol) {
for ri := range s.R {
r := &s.R[ri]
if ctxt.BuildMode == BuildModePIE && ctxt.LinkMode == LinkInternal {
@@ -605,9 +618,12 @@ func dynrelocsym(ctxt *Link, s *sym.Symbol) {
}
func dynreloc(ctxt *Link, data *[sym.SXREF][]*sym.Symbol) {
+ if ctxt.HeadType == objabi.Hwindows {
+ return
+ }
// -d suppresses dynamic loader format, so we may as well not
// compute these sections or mark their symbols as reachable.
- if *FlagD && ctxt.HeadType != objabi.Hwindows {
+ if *FlagD {
return
}
if ctxt.Debugvlog != 0 {
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 743f4cedd4..827a1d931a 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -863,7 +863,9 @@ func defdwsymb(ctxt *Link, s *sym.Symbol, str string, t SymbolType, v int64, got
default:
return
}
-
+ if ctxt.LinkMode != LinkExternal && isStaticTemp(s.Name) {
+ return
+ }
dwarfDefineGlobal(ctxt, s, str, v, gotype)
case AutoSym, ParamSym, DeletedAutoSym:
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index 5d2aadf589..42b598efef 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -526,7 +526,7 @@ func (ex *examiner) entryFromOffset(off dwarf.Offset) *dwarf.Entry {
return nil
}
-// Return the ID that that examiner uses to refer to the DIE at offset off
+// Return the ID that examiner uses to refer to the DIE at offset off
func (ex *examiner) idxFromOffset(off dwarf.Offset) int {
if idx, found := ex.idxByOffset[off]; found {
return idx
@@ -1062,3 +1062,76 @@ func main() {
}
}
}
+
+func TestStaticTmp(t *testing.T) {
+ // Checks that statictmp variables do not appear in debug_info or the
+ // symbol table.
+ // Also checks that statictmp variables do not collide with user defined
+ // variables (issue #25113)
+
+ testenv.MustHaveGoBuild(t)
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ const prog = `package main
+
+var stmp_0 string
+var a []int
+
+func init() {
+ a = []int{ 7 }
+}
+
+func main() {
+ println(a[0])
+}
+`
+
+ f := gobuild(t, dir, prog, NoOpt)
+
+ defer f.Close()
+
+ d, err := f.DWARF()
+ if err != nil {
+ t.Fatalf("error reading DWARF: %v", err)
+ }
+
+ rdr := d.Reader()
+ for {
+ e, err := rdr.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if e == nil {
+ break
+ }
+ if e.Tag != dwarf.TagVariable {
+ continue
+ }
+ name, ok := e.Val(dwarf.AttrName).(string)
+ if !ok {
+ continue
+ }
+ if strings.Contains(name, "stmp") {
+ t.Errorf("statictmp variable found in debug_info: %s at %x", name, e.Offset)
+ }
+ }
+
+ syms, err := f.Symbols()
+ if err != nil {
+ t.Fatalf("error reading symbols: %v", err)
+ }
+ for _, sym := range syms {
+ if strings.Contains(sym.Name, "stmp") {
+ t.Errorf("statictmp variable found in symbol table: %s", sym.Name)
+ }
+ }
+}
diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go
index f61a290e42..c2a2b3a7ba 100644
--- a/src/cmd/link/internal/ld/elf.go
+++ b/src/cmd/link/internal/ld/elf.go
@@ -506,7 +506,7 @@ func Elfinit(ctxt *Link) {
}
elf64 = true
- ehdr.phoff = ELF64HDRSIZE /* Must be be ELF64HDRSIZE: first PHdr must follow ELF header */
+ ehdr.phoff = ELF64HDRSIZE /* Must be ELF64HDRSIZE: first PHdr must follow ELF header */
ehdr.shoff = ELF64HDRSIZE /* Will move as we add PHeaders */
ehdr.ehsize = ELF64HDRSIZE /* Must be ELF64HDRSIZE */
ehdr.phentsize = ELF64PHDRSIZE /* Must be ELF64PHDRSIZE */
@@ -533,7 +533,7 @@ func Elfinit(ctxt *Link) {
fallthrough
default:
ehdr.phoff = ELF32HDRSIZE
- /* Must be be ELF32HDRSIZE: first PHdr must follow ELF header */
+ /* Must be ELF32HDRSIZE: first PHdr must follow ELF header */
ehdr.shoff = ELF32HDRSIZE /* Will move as we add PHeaders */
ehdr.ehsize = ELF32HDRSIZE /* Must be ELF32HDRSIZE */
ehdr.phentsize = ELF32PHDRSIZE /* Must be ELF32PHDRSIZE */
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index e911d7bf08..c304b858df 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -646,7 +646,7 @@ func (ctxt *Link) loadlib() {
//
// These are the symbols that begin with the prefix 'type.' and
// contain run-time type information used by the runtime and reflect
-// packages. All Go binaries contain these symbols, but only only
+// packages. All Go binaries contain these symbols, but only
// those programs loaded dynamically in multiple parts need these
// symbols to have entries in the symbol table.
func (ctxt *Link) mangleTypeSym() {
@@ -1566,7 +1566,7 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string,
//
// Note: It's possible for "\n!\n" to appear within the binary
// package export data format. To avoid truncating the package
- // definition prematurely (issue 21703), we keep keep track of
+ // definition prematurely (issue 21703), we keep track of
// how many "$$" delimiters we've seen.
import0 := f.Offset()
diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go
index 905380a1db..2c5152f2e3 100644
--- a/src/cmd/link/internal/ld/main.go
+++ b/src/cmd/link/internal/ld/main.go
@@ -222,6 +222,7 @@ func Main(arch *sys.Arch, theArch Arch) {
ctxt.dostkcheck()
if ctxt.HeadType == objabi.Hwindows {
ctxt.dope()
+ ctxt.windynrelocsyms()
}
ctxt.addexport()
thearch.Gentext(ctxt) // trampolines, call stubs, etc.
diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go
index 24398fcc87..3eb3d05882 100644
--- a/src/cmd/link/internal/ld/pcln.go
+++ b/src/cmd/link/internal/ld/pcln.go
@@ -7,6 +7,7 @@ package ld
import (
"cmd/internal/objabi"
"cmd/internal/src"
+ "cmd/internal/sys"
"cmd/link/internal/sym"
"log"
"os"
@@ -314,13 +315,26 @@ func (ctxt *Link) pclntab() {
// deferreturn
deferreturn := uint32(0)
+ lastWasmAddr := uint32(0)
for _, r := range s.R {
+ if ctxt.Arch.Family == sys.Wasm && r.Type == objabi.R_ADDR {
+ // Wasm does not have a live variable set at the deferreturn
+ // call itself. Instead it has one identified by the
+ // resumption point immediately preceding the deferreturn.
+ // The wasm code has a R_ADDR relocation which is used to
+ // set the resumption point to PC_B.
+ lastWasmAddr = uint32(r.Add)
+ }
if r.Sym != nil && r.Sym.Name == "runtime.deferreturn" && r.Add == 0 {
- // Note: the relocation target is in the call instruction, but
- // is not necessarily the whole instruction (for instance, on
- // x86 the relocation applies to bytes [1:5] of the 5 byte call
- // instruction).
- deferreturn = uint32(r.Off)
+ if ctxt.Arch.Family == sys.Wasm {
+ deferreturn = lastWasmAddr
+ } else {
+ // Note: the relocation target is in the call instruction, but
+ // is not necessarily the whole instruction (for instance, on
+ // x86 the relocation applies to bytes [1:5] of the 5 byte call
+ // instruction).
+ deferreturn = uint32(r.Off)
+ }
break // only need one
}
}
diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go
index db269c78e5..cf197f50b0 100644
--- a/src/cmd/link/internal/ld/pe.go
+++ b/src/cmd/link/internal/ld/pe.go
@@ -1461,12 +1461,6 @@ func addPEBaseReloc(ctxt *Link) {
}
func (ctxt *Link) dope() {
- /* relocation table */
- rel := ctxt.Syms.Lookup(".rel", 0)
-
- rel.Attr |= sym.AttrReachable
- rel.Type = sym.SELFROSECT
-
initdynimport(ctxt)
initdynexport(ctxt)
}
@@ -1534,9 +1528,6 @@ func Asmbpe(ctxt *Link) {
// some data symbols (e.g. masks) end up in the .rdata section, and they normally
// expect larger alignment requirement than the default text section alignment.
ro.characteristics |= IMAGE_SCN_ALIGN_32BYTES
- } else {
- // TODO(brainman): should not need IMAGE_SCN_MEM_EXECUTE, but I do not know why it carshes without it
- ro.characteristics |= IMAGE_SCN_MEM_EXECUTE
}
ro.checkSegment(&Segrodata)
pefile.rdataSect = ro
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index 2a04ef3824..d2737deca5 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -432,6 +432,10 @@ func (ctxt *Link) symtab() {
// just defined above will be first.
// hide the specific symbols.
for _, s := range ctxt.Syms.Allsym {
+ if ctxt.LinkMode != LinkExternal && isStaticTemp(s.Name) {
+ s.Attr |= sym.AttrNotInSymbolTable
+ }
+
if !s.Attr.Reachable() || s.Attr.Special() || s.Type != sym.SRODATA {
continue
}
@@ -676,3 +680,10 @@ func (ctxt *Link) symtab() {
lastmoduledatap.AddAddr(ctxt.Arch, moduledata)
}
}
+
+func isStaticTemp(name string) bool {
+ if i := strings.LastIndex(name, "/"); i >= 0 {
+ name = name[i:]
+ }
+ return strings.Contains(name, "..stmp_")
+}
diff --git a/src/cmd/link/internal/loadxcoff/ldxcoff.go b/src/cmd/link/internal/loadxcoff/ldxcoff.go
new file mode 100644
index 0000000000..7204d34388
--- /dev/null
+++ b/src/cmd/link/internal/loadxcoff/ldxcoff.go
@@ -0,0 +1,225 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package loadxcoff implements a XCOFF file reader.
+package loadxcoff
+
+import (
+ "cmd/internal/bio"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+ "cmd/internal/xcoff"
+ "cmd/link/internal/sym"
+ "errors"
+ "fmt"
+)
+
+// ldSection is an XCOFF section with its symbols.
+type ldSection struct {
+ xcoff.Section
+ sym *sym.Symbol
+}
+
+// TODO(brainman): maybe just add ReadAt method to bio.Reader instead of creating xcoffBiobuf
+
+// xcoffBiobuf makes bio.Reader look like io.ReaderAt.
+type xcoffBiobuf bio.Reader
+
+func (f *xcoffBiobuf) ReadAt(p []byte, off int64) (int, error) {
+ ret := ((*bio.Reader)(f)).Seek(off, 0)
+ if ret < 0 {
+ return 0, errors.New("fail to seek")
+ }
+ n, err := f.Read(p)
+ if err != nil {
+ return 0, err
+ }
+ return n, nil
+}
+
+// Load loads the Xcoff file pn from f.
+// Symbols are written into syms, and a slice of the text symbols is returned.
+func Load(arch *sys.Arch, syms *sym.Symbols, input *bio.Reader, pkg string, length int64, pn string) (textp []*sym.Symbol, err error) {
+ errorf := func(str string, args ...interface{}) ([]*sym.Symbol, error) {
+ return nil, fmt.Errorf("loadxcoff: %v: %v", pn, fmt.Sprintf(str, args...))
+ }
+ localSymVersion := syms.IncVersion()
+
+ var ldSections []*ldSection
+
+ f, err := xcoff.NewFile((*xcoffBiobuf)(input))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ for _, sect := range f.Sections {
+ //only text, data and bss section
+ if sect.Type < xcoff.STYP_TEXT || sect.Type > xcoff.STYP_BSS {
+ continue
+ }
+ lds := new(ldSection)
+ lds.Section = *sect
+ name := fmt.Sprintf("%s(%s)", pkg, lds.Name)
+ s := syms.Lookup(name, localSymVersion)
+
+ switch lds.Type {
+ default:
+ return errorf("unrecognized section type 0x%x", lds.Type)
+ case xcoff.STYP_TEXT:
+ s.Type = sym.STEXT
+ case xcoff.STYP_DATA:
+ s.Type = sym.SNOPTRDATA
+ case xcoff.STYP_BSS:
+ s.Type = sym.SNOPTRBSS
+ }
+
+ s.Size = int64(lds.Size)
+ if s.Type != sym.SNOPTRBSS {
+ data, err := lds.Section.Data()
+ if err != nil {
+ return nil, err
+ }
+ s.P = data
+ }
+
+ lds.sym = s
+ ldSections = append(ldSections, lds)
+ }
+
+ // sx = symbol from file
+ // s = symbol for syms
+ for _, sx := range f.Symbols {
+ // get symbol type
+ stype, errmsg := getSymbolType(f, sx)
+ if errmsg != "" {
+ return errorf("error reading symbol %s: %s", sx.Name, errmsg)
+ }
+ if stype == sym.Sxxx {
+ continue
+ }
+
+ s := syms.Lookup(sx.Name, 0)
+
+ // Text symbol
+ if s.Type == sym.STEXT {
+ if s.Attr.OnList() {
+ return errorf("symbol %s listed multiple times", s.Name)
+ }
+ s.Attr |= sym.AttrOnList
+ textp = append(textp, s)
+ }
+ }
+
+ // Read relocations
+ for _, sect := range ldSections {
+ // TODO(aix): Dwarf section relocation if needed
+ if sect.Type != xcoff.STYP_TEXT && sect.Type != xcoff.STYP_DATA {
+ continue
+ }
+ rs := make([]sym.Reloc, sect.Nreloc)
+ for i, rx := range sect.Relocs {
+ r := &rs[i]
+
+ r.Sym = syms.Lookup(rx.Symbol.Name, 0)
+ if uint64(int32(rx.VirtualAddress)) != rx.VirtualAddress {
+ return errorf("virtual address of a relocation is too big: 0x%x", rx.VirtualAddress)
+ }
+ r.Off = int32(rx.VirtualAddress)
+ switch rx.Type {
+ default:
+ return errorf("section %s: unknown relocation of type 0x%x", sect.Name, rx.Type)
+ case xcoff.R_POS:
+ // Reloc the address of r.Sym
+ // Length should be 64
+ if rx.Length != 64 {
+ return errorf("section %s: relocation R_POS has length different from 64: %d", sect.Name, rx.Length)
+ }
+ r.Siz = 8
+ r.Type = objabi.R_CONST
+ r.Add = int64(rx.Symbol.Value)
+
+ case xcoff.R_RBR:
+ r.Siz = 4
+ r.Type = objabi.R_CALLPOWER
+ r.Add = 0 //
+
+ }
+ }
+ s := sect.sym
+ s.R = rs
+ s.R = s.R[:sect.Nreloc]
+ }
+ return textp, nil
+
+}
+
+// Convert symbol xcoff type to sym.SymKind
+// Returns nil if this shouldn't be added into syms (like .file or .dw symbols )
+func getSymbolType(f *xcoff.File, s *xcoff.Symbol) (stype sym.SymKind, err string) {
+ // .file symbol
+ if s.SectionNumber == -2 {
+ if s.StorageClass == xcoff.C_FILE {
+ return sym.Sxxx, ""
+ }
+ return sym.Sxxx, "unrecognised StorageClass for sectionNumber = -2"
+ }
+
+ // extern symbols
+ // TODO(aix)
+ if s.SectionNumber == 0 {
+ return sym.Sxxx, ""
+ }
+
+ sectType := f.Sections[s.SectionNumber-1].SectionHeader.Type
+ switch sectType {
+ default:
+ return sym.Sxxx, fmt.Sprintf("getSymbolType for Section type 0x%x not implemented", sectType)
+ case xcoff.STYP_DWARF, xcoff.STYP_DEBUG:
+ return sym.Sxxx, ""
+ case xcoff.STYP_DATA, xcoff.STYP_BSS, xcoff.STYP_TEXT:
+ }
+
+ switch s.StorageClass {
+ default:
+ return sym.Sxxx, fmt.Sprintf("getSymbolType for Storage class 0x%x not implemented", s.StorageClass)
+ case xcoff.C_HIDEXT, xcoff.C_EXT, xcoff.C_WEAKEXT:
+ switch s.AuxCSect.StorageMappingClass {
+ default:
+ return sym.Sxxx, fmt.Sprintf("getSymbolType for Storage class 0x%x and Storage Map 0x%x not implemented", s.StorageClass, s.AuxCSect.StorageMappingClass)
+
+ // Program Code
+ case xcoff.XMC_PR:
+ if sectType == xcoff.STYP_TEXT {
+ return sym.STEXT, ""
+ }
+ return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_PR", sectType, s.StorageClass)
+
+ // Read/Write Data
+ case xcoff.XMC_RW:
+ if sectType == xcoff.STYP_DATA {
+ return sym.SDATA, ""
+ }
+ if sectType == xcoff.STYP_BSS {
+ return sym.SBSS, ""
+ }
+ return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_RW", sectType, s.StorageClass)
+
+ // Function descriptor
+ case xcoff.XMC_DS:
+ if sectType == xcoff.STYP_DATA {
+ return sym.SDATA, ""
+ }
+ return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_DS", sectType, s.StorageClass)
+
+ // TOC anchor and TOC entry
+ case xcoff.XMC_TC0, xcoff.XMC_TE:
+ if sectType == xcoff.STYP_DATA {
+ return sym.SXCOFFTOC, ""
+ }
+ return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_DS", sectType, s.StorageClass)
+
+ }
+ }
+}
diff --git a/src/cmd/link/internal/sym/symkind.go b/src/cmd/link/internal/sym/symkind.go
index 2e21cc1f00..b1756d6145 100644
--- a/src/cmd/link/internal/sym/symkind.go
+++ b/src/cmd/link/internal/sym/symkind.go
@@ -92,6 +92,7 @@ const (
SBSS
SNOPTRBSS
STLSBSS
+ SXCOFFTOC
SXREF
SMACHOSYMSTR
SMACHOSYMTAB
diff --git a/src/cmd/link/internal/sym/symkind_string.go b/src/cmd/link/internal/sym/symkind_string.go
index e7e56c4003..7428503b1c 100644
--- a/src/cmd/link/internal/sym/symkind_string.go
+++ b/src/cmd/link/internal/sym/symkind_string.go
@@ -4,9 +4,9 @@ package sym
import "strconv"
-const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASBSSSNOPTRBSSSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISC"
+const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASBSSSNOPTRBSSSTLSBSSSXCOFFTOCSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISC"
-var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 271, 280, 287, 292, 304, 316, 333, 350, 359, 365, 375, 383, 393, 403, 414, 423, 433}
+var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 271, 280, 287, 296, 301, 313, 325, 342, 359, 368, 374, 384, 392, 402, 412, 423, 432, 442}
func (i SymKind) String() string {
if i >= SymKind(len(_SymKind_index)-1) {
diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go
index 6e885121c8..cf91e4d596 100644
--- a/src/cmd/vet/main.go
+++ b/src/cmd/vet/main.go
@@ -22,6 +22,7 @@ import (
"go/types"
"io"
"io/ioutil"
+ "log"
"os"
"path/filepath"
"sort"
@@ -31,10 +32,9 @@ import (
"cmd/internal/objabi"
)
-// Important! If you add flags here, make sure to update cmd/go/internal/vet/vetflag.go.
-
var (
verbose = flag.Bool("v", false, "verbose")
+ flags = flag.Bool("flags", false, "print flags in JSON")
source = flag.Bool("source", false, "import from source instead of compiled object files")
tags = flag.String("tags", "", "space-separated list of build tags to apply when parsing")
tagList = []string{} // exploded version of tags flag; set in main
@@ -259,6 +259,32 @@ func main() {
flag.Usage = Usage
flag.Parse()
+ // -flags: print flags as JSON. Used by go vet.
+ if *flags {
+ type jsonFlag struct {
+ Name string
+ Bool bool
+ Usage string
+ }
+ var jsonFlags []jsonFlag
+ flag.VisitAll(func(f *flag.Flag) {
+ isBool := false
+ switch v := f.Value.(type) {
+ case interface{ BoolFlag() bool }:
+ isBool = v.BoolFlag()
+ case *triState:
+ isBool = true // go vet should treat it as boolean
+ }
+ jsonFlags = append(jsonFlags, jsonFlag{f.Name, isBool, f.Usage})
+ })
+ data, err := json.MarshalIndent(jsonFlags, "", "\t")
+ if err != nil {
+ log.Fatal(err)
+ }
+ os.Stdout.Write(data)
+ os.Exit(0)
+ }
+
// If any flag is set, we run only those checks requested.
// If all flag is set true or if no flags are set true, set all the non-experimental ones
// not explicitly set (in effect, set the "-all" flag).
diff --git a/src/cmd/vet/testdata/structtag.go b/src/cmd/vet/testdata/structtag.go
index ad55c4ab64..755d52be84 100644
--- a/src/cmd/vet/testdata/structtag.go
+++ b/src/cmd/vet/testdata/structtag.go
@@ -59,7 +59,7 @@ type DuplicateJSONFields struct {
NonJSON int `foo:"a"`
DuplicateNonJSON int `foo:"a"`
Embedded struct {
- DuplicateJSON int `json:"a"` // OK because its not in the same struct type
+ DuplicateJSON int `json:"a"` // OK because it's not in the same struct type
}
AnonymousJSON `json:"a"` // ERROR "struct field AnonymousJSON repeats json tag .a. also at structtag.go:52"
@@ -75,7 +75,7 @@ type DuplicateJSONFields struct {
NonXML int `foo:"a"`
DuplicateNonXML int `foo:"a"`
Embedded2 struct {
- DuplicateXML int `xml:"a"` // OK because its not in the same struct type
+ DuplicateXML int `xml:"a"` // OK because it's not in the same struct type
}
AnonymousXML `xml:"a"` // ERROR "struct field AnonymousXML repeats xml tag .a. also at structtag.go:68"
Attribute struct {
diff --git a/src/cmd/vet/testdata/unmarshal.go b/src/cmd/vet/testdata/unmarshal.go
new file mode 100644
index 0000000000..f541b4a414
--- /dev/null
+++ b/src/cmd/vet/testdata/unmarshal.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the unmarshal checker.
+
+package testdata
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+)
+
+func _() {
+ type t struct {
+ a int
+ }
+ var v t
+ var r io.Reader
+
+ json.Unmarshal([]byte{}, v) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ json.Unmarshal([]byte{}, &v)
+ json.NewDecoder(r).Decode(v) // ERROR "call of Decode passes non-pointer"
+ json.NewDecoder(r).Decode(&v)
+ gob.NewDecoder(r).Decode(v) // ERROR "call of Decode passes non-pointer"
+ gob.NewDecoder(r).Decode(&v)
+ xml.Unmarshal([]byte{}, v) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ xml.Unmarshal([]byte{}, &v)
+ xml.NewDecoder(r).Decode(v) // ERROR "call of Decode passes non-pointer"
+ xml.NewDecoder(r).Decode(&v)
+
+ var p *t
+ json.Unmarshal([]byte{}, p)
+ json.Unmarshal([]byte{}, *p) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ json.NewDecoder(r).Decode(p)
+ json.NewDecoder(r).Decode(*p) // ERROR "call of Decode passes non-pointer"
+ gob.NewDecoder(r).Decode(p)
+ gob.NewDecoder(r).Decode(*p) // ERROR "call of Decode passes non-pointer"
+ xml.Unmarshal([]byte{}, p)
+ xml.Unmarshal([]byte{}, *p) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ xml.NewDecoder(r).Decode(p)
+ xml.NewDecoder(r).Decode(*p) // ERROR "call of Decode passes non-pointer"
+
+ var i interface{}
+ json.Unmarshal([]byte{}, i)
+ json.NewDecoder(r).Decode(i)
+
+ json.Unmarshal([]byte{}, nil) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ json.Unmarshal([]byte{}, []t{}) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ json.Unmarshal([]byte{}, map[string]int{}) // ERROR "call of Unmarshal passes non-pointer as second argument"
+ json.NewDecoder(r).Decode(nil) // ERROR "call of Decode passes non-pointer"
+ json.NewDecoder(r).Decode([]t{}) // ERROR "call of Decode passes non-pointer"
+ json.NewDecoder(r).Decode(map[string]int{}) // ERROR "call of Decode passes non-pointer"
+
+ json.Unmarshal(func() ([]byte, interface{}) { return []byte{}, v }())
+}
diff --git a/src/cmd/vet/unmarshal.go b/src/cmd/vet/unmarshal.go
new file mode 100644
index 0000000000..3e4c25b6b9
--- /dev/null
+++ b/src/cmd/vet/unmarshal.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines the check for passing non-pointer or non-interface
+// types to unmarshal and decode functions.
+
+package main
+
+import (
+ "go/ast"
+ "go/types"
+ "strings"
+)
+
+func init() {
+ register("unmarshal",
+ "check for passing non-pointer or non-interface types to unmarshal and decode functions",
+ checkUnmarshalArg,
+ callExpr)
+}
+
+var pointerArgFuncs = map[string]int{
+ "encoding/json.Unmarshal": 1,
+ "(*encoding/json.Decoder).Decode": 0,
+ "(*encoding/gob.Decoder).Decode": 0,
+ "encoding/xml.Unmarshal": 1,
+ "(*encoding/xml.Decoder).Decode": 0,
+}
+
+func checkUnmarshalArg(f *File, n ast.Node) {
+ call, ok := n.(*ast.CallExpr)
+ if !ok {
+ return // not a call statement
+ }
+ fun := unparen(call.Fun)
+
+ if f.pkg.types[fun].IsType() {
+ return // a conversion, not a call
+ }
+
+ info := &types.Info{Uses: f.pkg.uses, Selections: f.pkg.selectors}
+ name := callName(info, call)
+
+ arg, ok := pointerArgFuncs[name]
+ if !ok {
+ return // not a function we are interested in
+ }
+
+ if len(call.Args) < arg+1 {
+ return // not enough arguments, e.g. called with return values of another function
+ }
+
+ typ := f.pkg.types[call.Args[arg]]
+
+ if typ.Type == nil {
+ return // type error prevents further analysis
+ }
+
+ switch typ.Type.Underlying().(type) {
+ case *types.Pointer, *types.Interface:
+ return
+ }
+
+ shortname := name[strings.LastIndexByte(name, '.')+1:]
+ switch arg {
+ case 0:
+ f.Badf(call.Lparen, "call of %s passes non-pointer", shortname)
+ case 1:
+ f.Badf(call.Lparen, "call of %s passes non-pointer as second argument", shortname)
+ }
+}
diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go
index df84d6cc98..da5a6ed87c 100644
--- a/src/cmd/vet/vet_test.go
+++ b/src/cmd/vet/vet_test.go
@@ -118,11 +118,12 @@ func TestVetPrint(t *testing.T) {
Build(t)
file := filepath.Join("testdata", "print.go")
cmd := exec.Command(
- "go", "vet", "-vettool="+binary,
+ "go", "vet",
"-printf",
"-printfuncs=Warn:1,Warnf:1",
file,
)
+ cmd.Env = append(os.Environ(), "GOVETTOOL="+binary)
errchk(cmd, []string{file}, t)
}
@@ -232,10 +233,10 @@ func TestVetVerbose(t *testing.T) {
// this function will report an error.
// Likewise if outStr does not have an error for a line which has a comment,
// or if the error message does not match the .
-// The syntax is Perl but its best to stick to egrep.
+// The syntax is Perl but it's best to stick to egrep.
//
// Sources files are supplied as fullshort slice.
-// It consists of pairs: full path to source file and it's base name.
+// It consists of pairs: full path to source file and its base name.
func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) {
var errs []error
out := splitOutput(outStr, wantAuto)
diff --git a/src/compress/flate/inflate.go b/src/compress/flate/inflate.go
index 25e81f3f72..685be70a3e 100644
--- a/src/compress/flate/inflate.go
+++ b/src/compress/flate/inflate.go
@@ -65,7 +65,7 @@ func (e *WriteError) Error() string {
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
}
-// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
diff --git a/src/compress/zlib/reader.go b/src/compress/zlib/reader.go
index 2efa193035..a195b380d8 100644
--- a/src/compress/zlib/reader.go
+++ b/src/compress/zlib/reader.go
@@ -51,7 +51,7 @@ type reader struct {
scratch [4]byte
}
-// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
diff --git a/src/crypto/aes/aes_test.go b/src/crypto/aes/aes_test.go
index bedc2da946..1e8bac4bb5 100644
--- a/src/crypto/aes/aes_test.go
+++ b/src/crypto/aes/aes_test.go
@@ -231,12 +231,10 @@ L:
continue L
}
}
- if dec != nil {
- for j, v := range dec {
- if v != tt.dec[j] {
- t.Errorf("key %d: dec[%d] = %#x, want %#x", i, j, v, tt.dec[j])
- continue L
- }
+ for j, v := range dec {
+ if v != tt.dec[j] {
+ t.Errorf("key %d: dec[%d] = %#x, want %#x", i, j, v, tt.dec[j])
+ continue L
}
}
}
diff --git a/src/crypto/rand/eagain.go b/src/crypto/rand/eagain.go
index 7ed2f47ea6..045d037d20 100644
--- a/src/crypto/rand/eagain.go
+++ b/src/crypto/rand/eagain.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package rand
diff --git a/src/crypto/rand/rand_freebsd.go b/src/crypto/rand/rand_freebsd.go
index b4d6653343..75f683c386 100644
--- a/src/crypto/rand/rand_freebsd.go
+++ b/src/crypto/rand/rand_freebsd.go
@@ -6,4 +6,4 @@ package rand
// maxGetRandomRead is the maximum number of bytes to ask for in one call to the
// getrandom() syscall. In FreeBSD at most 256 bytes will be returned per call.
-const maxGetRandomRead = (1 << 8)
+const maxGetRandomRead = 1 << 8
diff --git a/src/crypto/rand/rand_unix.go b/src/crypto/rand/rand_unix.go
index 272b83d52b..80c8eaf97b 100644
--- a/src/crypto/rand/rand_unix.go
+++ b/src/crypto/rand/rand_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris
+// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris
// Unix cryptographically secure pseudorandom number
// generator.
diff --git a/src/crypto/tls/auth.go b/src/crypto/tls/auth.go
index 88face4cde..a27db45b01 100644
--- a/src/crypto/tls/auth.go
+++ b/src/crypto/tls/auth.go
@@ -23,10 +23,9 @@ import (
func pickSignatureAlgorithm(pubkey crypto.PublicKey, peerSigAlgs, ourSigAlgs []SignatureScheme, tlsVersion uint16) (sigAlg SignatureScheme, sigType uint8, hashFunc crypto.Hash, err error) {
if tlsVersion < VersionTLS12 || len(peerSigAlgs) == 0 {
// For TLS 1.1 and before, the signature algorithm could not be
- // negotiated and the hash is fixed based on the signature type.
- // For TLS 1.2, if the client didn't send signature_algorithms
- // extension then we can assume that it supports SHA1. See
- // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
+ // negotiated and the hash is fixed based on the signature type. For TLS
+ // 1.2, if the client didn't send signature_algorithms extension then we
+ // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1.
switch pubkey.(type) {
case *rsa.PublicKey:
if tlsVersion < VersionTLS12 {
diff --git a/src/crypto/tls/cipher_suites.go b/src/crypto/tls/cipher_suites.go
index e4027e8ab4..2475906ae1 100644
--- a/src/crypto/tls/cipher_suites.go
+++ b/src/crypto/tls/cipher_suites.go
@@ -14,9 +14,8 @@ import (
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
- "hash"
-
"golang_org/x/crypto/chacha20poly1305"
+ "hash"
)
// a keyAgreement implements the client and server side of a TLS key agreement
@@ -140,25 +139,29 @@ func macSHA1(version uint16, key []byte) macFunction {
if !boring.Enabled {
h = newConstantTimeHash(h)
}
- return tls10MAC{hmac.New(h, key)}
+ return tls10MAC{h: hmac.New(h, key)}
}
// macSHA256 returns a SHA-256 based MAC. These are only supported in TLS 1.2
// so the given version is ignored.
func macSHA256(version uint16, key []byte) macFunction {
- return tls10MAC{hmac.New(sha256.New, key)}
+ return tls10MAC{h: hmac.New(sha256.New, key)}
}
type macFunction interface {
+ // Size returns the length of the MAC.
Size() int
- MAC(digestBuf, seq, header, data, extra []byte) []byte
+ // MAC appends the MAC of (seq, header, data) to out. The extra data is fed
+ // into the MAC after obtaining the result to normalize timing. The result
+ // is only valid until the next invocation of MAC as the buffer is reused.
+ MAC(seq, header, data, extra []byte) []byte
}
type aead interface {
cipher.AEAD
- // explicitIVLen returns the number of bytes used by the explicit nonce
- // that is included in the record. This is eight for older AEADs and
+ // explicitNonceLen returns the number of bytes of explicit nonce
+ // included in each record. This is eight for older AEADs and
// zero for modern ones.
explicitNonceLen() int
}
@@ -261,6 +264,7 @@ func aeadChaCha20Poly1305(key, fixedNonce []byte) cipher.AEAD {
type ssl30MAC struct {
h hash.Hash
key []byte
+ buf []byte
}
func (s ssl30MAC) Size() int {
@@ -273,7 +277,7 @@ var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0
// MAC does not offer constant timing guarantees for SSL v3.0, since it's deemed
// useless considering the similar, protocol-level POODLE vulnerability.
-func (s ssl30MAC) MAC(digestBuf, seq, header, data, extra []byte) []byte {
+func (s ssl30MAC) MAC(seq, header, data, extra []byte) []byte {
padLength := 48
if s.h.Size() == 20 {
padLength = 40
@@ -286,13 +290,13 @@ func (s ssl30MAC) MAC(digestBuf, seq, header, data, extra []byte) []byte {
s.h.Write(header[:1])
s.h.Write(header[3:5])
s.h.Write(data)
- digestBuf = s.h.Sum(digestBuf[:0])
+ s.buf = s.h.Sum(s.buf[:0])
s.h.Reset()
s.h.Write(s.key)
s.h.Write(ssl30Pad2[:padLength])
- s.h.Write(digestBuf)
- return s.h.Sum(digestBuf[:0])
+ s.h.Write(s.buf)
+ return s.h.Sum(s.buf[:0])
}
type constantTimeHash interface {
@@ -323,9 +327,10 @@ func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
}
}
-// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, section 6.2.3.
+// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3.
type tls10MAC struct {
- h hash.Hash
+ h hash.Hash
+ buf []byte
}
func (s tls10MAC) Size() int {
@@ -335,12 +340,12 @@ func (s tls10MAC) Size() int {
// MAC is guaranteed to take constant time, as long as
// len(seq)+len(header)+len(data)+len(extra) is constant. extra is not fed into
// the MAC, but is only provided to make the timing profile constant.
-func (s tls10MAC) MAC(digestBuf, seq, header, data, extra []byte) []byte {
+func (s tls10MAC) MAC(seq, header, data, extra []byte) []byte {
s.h.Reset()
s.h.Write(seq)
s.h.Write(header)
s.h.Write(data)
- res := s.h.Sum(digestBuf[:0])
+ res := s.h.Sum(s.buf[:0])
if extra != nil {
s.h.Write(extra)
}
@@ -410,7 +415,6 @@ const (
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 uint16 = 0xcca9
// TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
- // that the client is doing version fallback. See
- // https://tools.ietf.org/html/rfc7507.
+ // that the client is doing version fallback. See RFC 7507.
TLS_FALLBACK_SCSV uint16 = 0x5600
)
diff --git a/src/crypto/tls/common.go b/src/crypto/tls/common.go
index 5a27be2491..98d13b038d 100644
--- a/src/crypto/tls/common.go
+++ b/src/crypto/tls/common.go
@@ -80,7 +80,7 @@ const (
extensionSupportedPoints uint16 = 11
extensionSignatureAlgorithms uint16 = 13
extensionALPN uint16 = 16
- extensionSCT uint16 = 18 // https://tools.ietf.org/html/rfc6962#section-6
+ extensionSCT uint16 = 18 // RFC 6962, Section 6
extensionSessionTicket uint16 = 35
extensionNextProtoNeg uint16 = 13172 // not IANA assigned
extensionRenegotiationInfo uint16 = 0xff01
@@ -129,7 +129,7 @@ const (
)
// Signature algorithms (for internal signaling use). Starting at 16 to avoid overlap with
-// TLS 1.2 codepoints (RFC 5246, section A.4.1), with which these have nothing to do.
+// TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do.
const (
signaturePKCS1v15 uint8 = iota + 16
signatureECDSA
@@ -178,9 +178,9 @@ type ConnectionState struct {
}
// ExportKeyingMaterial returns length bytes of exported key material in a new
-// slice as defined in https://tools.ietf.org/html/rfc5705. If context is nil,
-// it is not used as part of the seed. If the connection was set to allow
-// renegotiation via Config.Renegotiation, this function will return an error.
+// slice as defined in RFC 5705. If context is nil, it is not used as part of
+// the seed. If the connection was set to allow renegotiation via
+// Config.Renegotiation, this function will return an error.
func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
return cs.ekm(label, context, length)
}
@@ -223,7 +223,7 @@ type ClientSessionCache interface {
}
// SignatureScheme identifies a signature algorithm supported by TLS. See
-// https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.3.
+// RFC 8446, Section 4.2.3.
type SignatureScheme uint16
const (
@@ -253,32 +253,27 @@ type ClientHelloInfo struct {
// ServerName indicates the name of the server requested by the client
// in order to support virtual hosting. ServerName is only set if the
- // client is using SNI (see
- // https://tools.ietf.org/html/rfc4366#section-3.1).
+ // client is using SNI (see RFC 4366, Section 3.1).
ServerName string
// SupportedCurves lists the elliptic curves supported by the client.
// SupportedCurves is set only if the Supported Elliptic Curves
- // Extension is being used (see
- // https://tools.ietf.org/html/rfc4492#section-5.1.1).
+ // Extension is being used (see RFC 4492, Section 5.1.1).
SupportedCurves []CurveID
// SupportedPoints lists the point formats supported by the client.
// SupportedPoints is set only if the Supported Point Formats Extension
- // is being used (see
- // https://tools.ietf.org/html/rfc4492#section-5.1.2).
+ // is being used (see RFC 4492, Section 5.1.2).
SupportedPoints []uint8
// SignatureSchemes lists the signature and hash schemes that the client
// is willing to verify. SignatureSchemes is set only if the Signature
- // Algorithms Extension is being used (see
- // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1).
+ // Algorithms Extension is being used (see RFC 5246, Section 7.4.1.4.1).
SignatureSchemes []SignatureScheme
// SupportedProtos lists the application protocols supported by the client.
// SupportedProtos is set only if the Application-Layer Protocol
- // Negotiation Extension is being used (see
- // https://tools.ietf.org/html/rfc7301#section-3.1).
+ // Negotiation Extension is being used (see RFC 7301, Section 3.1).
//
// Servers can select a protocol by setting Config.NextProtos in a
// GetConfigForClient return value.
diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go
index 6e27e695bd..13cebc9042 100644
--- a/src/crypto/tls/conn.go
+++ b/src/crypto/tls/conn.go
@@ -82,9 +82,10 @@ type Conn struct {
// input/output
in, out halfConn
- rawInput *block // raw input, right off the wire
- input *block // application data waiting to be read
+ rawInput bytes.Buffer // raw input, starting with a record header
+ input bytes.Reader // application data waiting to be read, from rawInput.Next
hand bytes.Buffer // handshake data waiting to be read
+ outBuf []byte // scratch buffer used by out.encrypt
buffering bool // whether records are buffered in sendBuf
sendBuf []byte // a buffer of records waiting to be sent
@@ -149,14 +150,10 @@ type halfConn struct {
cipher interface{} // cipher algorithm
mac macFunction
seq [8]byte // 64-bit sequence number
- bfree *block // list of free blocks
additionalData [13]byte // to avoid allocs; interface method args escape
nextCipher interface{} // next encryption state
nextMac macFunction // next MAC algorithm
-
- // used to save allocating a new buffer for each MAC.
- inDigestBuf, outDigestBuf []byte
}
func (hc *halfConn) setErrorLocked(err error) error {
@@ -203,9 +200,33 @@ func (hc *halfConn) incSeq() {
panic("TLS: sequence number wraparound")
}
+// explicitNonceLen returns the number of bytes of explicit nonce or IV included
+// in each record. Explicit nonces are present only in CBC modes after TLS 1.0
+// and in certain AEAD modes in TLS 1.2.
+func (hc *halfConn) explicitNonceLen() int {
+ if hc.cipher == nil {
+ return 0
+ }
+
+ switch c := hc.cipher.(type) {
+ case cipher.Stream:
+ return 0
+ case aead:
+ return c.explicitNonceLen()
+ case cbcMode:
+ // TLS 1.1 introduced a per-record explicit IV to fix the BEAST attack.
+ if hc.version >= VersionTLS11 {
+ return c.BlockSize()
+ }
+ return 0
+ default:
+ panic("unknown cipher type")
+ }
+}
+
// extractPadding returns, in constant time, the length of the padding to remove
// from the end of payload. It also returns a byte which is equal to 255 if the
-// padding was valid and 0 otherwise. See RFC 2246, section 6.2.3.2
+// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
func extractPadding(payload []byte) (toRemove int, good byte) {
if len(payload) < 1 {
return 0, 0
@@ -268,283 +289,189 @@ type cbcMode interface {
SetIV([]byte)
}
-// decrypt checks and strips the mac and decrypts the data in b. Returns a
-// success boolean, the number of bytes to skip from the start of the record in
-// order to get the application payload, and an optional alert value.
-func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, alertValue alert) {
- // pull out payload
- payload := b.data[recordHeaderLen:]
-
- macSize := 0
- if hc.mac != nil {
- macSize = hc.mac.Size()
- }
+// decrypt authenticates and decrypts the record if protection is active at
+// this stage. The returned plaintext might overlap with the input.
+func (hc *halfConn) decrypt(record []byte) (plaintext []byte, err error) {
+ payload := record[recordHeaderLen:]
paddingGood := byte(255)
paddingLen := 0
- explicitIVLen := 0
- // decrypt
+ explicitNonceLen := hc.explicitNonceLen()
+
if hc.cipher != nil {
switch c := hc.cipher.(type) {
case cipher.Stream:
c.XORKeyStream(payload, payload)
case aead:
- explicitIVLen = c.explicitNonceLen()
- if len(payload) < explicitIVLen {
- return false, 0, alertBadRecordMAC
+ if len(payload) < explicitNonceLen {
+ return nil, alertBadRecordMAC
}
- nonce := payload[:explicitIVLen]
- payload = payload[explicitIVLen:]
-
+ nonce := payload[:explicitNonceLen]
if len(nonce) == 0 {
nonce = hc.seq[:]
}
+ payload = payload[explicitNonceLen:]
copy(hc.additionalData[:], hc.seq[:])
- copy(hc.additionalData[8:], b.data[:3])
+ copy(hc.additionalData[8:], record[:3])
n := len(payload) - c.Overhead()
hc.additionalData[11] = byte(n >> 8)
hc.additionalData[12] = byte(n)
+
var err error
- payload, err = c.Open(payload[:0], nonce, payload, hc.additionalData[:])
+ plaintext, err = c.Open(payload[:0], nonce, payload, hc.additionalData[:])
if err != nil {
- return false, 0, alertBadRecordMAC
+ return nil, alertBadRecordMAC
}
- b.resize(recordHeaderLen + explicitIVLen + len(payload))
case cbcMode:
blockSize := c.BlockSize()
- if hc.version >= VersionTLS11 {
- explicitIVLen = blockSize
+ minPayload := explicitNonceLen + roundUp(hc.mac.Size()+1, blockSize) // TODO: vuln?
+ if len(payload)%blockSize != 0 || len(payload) < minPayload {
+ return nil, alertBadRecordMAC
}
- if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) {
- return false, 0, alertBadRecordMAC
- }
-
- if explicitIVLen > 0 {
- c.SetIV(payload[:explicitIVLen])
- payload = payload[explicitIVLen:]
+ if explicitNonceLen > 0 {
+ c.SetIV(payload[:explicitNonceLen])
+ payload = payload[explicitNonceLen:]
}
c.CryptBlocks(payload, payload)
+
+ // In a limited attempt to protect against CBC padding oracles like
+ // Lucky13, the data past paddingLen (which is secret) is passed to
+ // the MAC function as extra data, to be fed into the HMAC after
+ // computing the digest. This makes the MAC roughly constant time as
+ // long as the digest computation is constant time and does not
+ // affect the subsequent write, modulo cache effects.
if hc.version == VersionSSL30 {
paddingLen, paddingGood = extractPaddingSSL30(payload)
} else {
paddingLen, paddingGood = extractPadding(payload)
-
- // To protect against CBC padding oracles like Lucky13, the data
- // past paddingLen (which is secret) is passed to the MAC
- // function as extra data, to be fed into the HMAC after
- // computing the digest. This makes the MAC constant time as
- // long as the digest computation is constant time and does not
- // affect the subsequent write.
}
default:
panic("unknown cipher type")
}
+ } else {
+ plaintext = payload
}
- // check, strip mac
if hc.mac != nil {
+ macSize := hc.mac.Size()
if len(payload) < macSize {
- return false, 0, alertBadRecordMAC
+ return nil, alertBadRecordMAC
}
- // strip mac off payload, b.data
n := len(payload) - macSize - paddingLen
n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 }
- b.data[3] = byte(n >> 8)
- b.data[4] = byte(n)
+ record[3] = byte(n >> 8)
+ record[4] = byte(n)
remoteMAC := payload[n : n+macSize]
- localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], payload[:n], payload[n+macSize:])
+ localMAC := hc.mac.MAC(hc.seq[0:], record[:recordHeaderLen], payload[:n], payload[n+macSize:])
if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 {
- return false, 0, alertBadRecordMAC
+ return nil, alertBadRecordMAC
}
- hc.inDigestBuf = localMAC
- b.resize(recordHeaderLen + explicitIVLen + n)
+ plaintext = payload[:n]
}
- hc.incSeq()
- return true, recordHeaderLen + explicitIVLen, 0
+ hc.incSeq()
+ return plaintext, nil
}
-// padToBlockSize calculates the needed padding block, if any, for a payload.
-// On exit, prefix aliases payload and extends to the end of the last full
-// block of payload. finalBlock is a fresh slice which contains the contents of
-// any suffix of payload as well as the needed padding to make finalBlock a
-// full block.
-func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) {
- overrun := len(payload) % blockSize
- paddingLen := blockSize - overrun
- prefix = payload[:len(payload)-overrun]
- finalBlock = make([]byte, blockSize)
- copy(finalBlock, payload[len(payload)-overrun:])
- for i := overrun; i < blockSize; i++ {
- finalBlock[i] = byte(paddingLen - 1)
+// sliceForAppend extends the input slice by n bytes. head is the full extended
+// slice, while tail is the appended part. If the original slice has sufficient
+// capacity no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
}
+ tail = head[len(in):]
return
}
-// encrypt encrypts and macs the data in b.
-func (hc *halfConn) encrypt(b *block, explicitIVLen int) (bool, alert) {
- // mac
+// encrypt encrypts payload, adding the appropriate nonce and/or MAC, and
+// appends it to record, which contains the record header.
+func (hc *halfConn) encrypt(record, payload []byte, rand io.Reader) ([]byte, error) {
+ if hc.cipher == nil {
+ return append(record, payload...), nil
+ }
+
+ var explicitNonce []byte
+ if explicitNonceLen := hc.explicitNonceLen(); explicitNonceLen > 0 {
+ record, explicitNonce = sliceForAppend(record, explicitNonceLen)
+ if _, isCBC := hc.cipher.(cbcMode); !isCBC && explicitNonceLen < 16 {
+ // The AES-GCM construction in TLS has an explicit nonce so that the
+ // nonce can be random. However, the nonce is only 8 bytes which is
+ // too small for a secure, random nonce. Therefore we use the
+ // sequence number as the nonce. The 3DES-CBC construction also has
+ // an 8 bytes nonce but its nonces must be unpredictable (see RFC
+ // 5246, Appendix F.3), forcing us to use randomness. That's not
+ // 3DES' biggest problem anyway because the birthday bound on block
+ // collision is reached first due to its simlarly small block size
+ // (see the Sweet32 attack).
+ copy(explicitNonce, hc.seq[:])
+ } else {
+ if _, err := io.ReadFull(rand, explicitNonce); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var mac []byte
if hc.mac != nil {
- mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:], nil)
-
- n := len(b.data)
- b.resize(n + len(mac))
- copy(b.data[n:], mac)
- hc.outDigestBuf = mac
+ mac = hc.mac.MAC(hc.seq[:], record[:recordHeaderLen], payload, nil)
}
- payload := b.data[recordHeaderLen:]
-
- // encrypt
- if hc.cipher != nil {
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- c.XORKeyStream(payload, payload)
- case aead:
- payloadLen := len(b.data) - recordHeaderLen - explicitIVLen
- b.resize(len(b.data) + c.Overhead())
- nonce := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
- payload := b.data[recordHeaderLen+explicitIVLen:]
- payload = payload[:payloadLen]
-
- copy(hc.additionalData[:], hc.seq[:])
- copy(hc.additionalData[8:], b.data[:3])
- hc.additionalData[11] = byte(payloadLen >> 8)
- hc.additionalData[12] = byte(payloadLen)
-
- c.Seal(payload[:0], nonce, payload, hc.additionalData[:])
- case cbcMode:
- blockSize := c.BlockSize()
- if explicitIVLen > 0 {
- c.SetIV(payload[:explicitIVLen])
- payload = payload[explicitIVLen:]
- }
- prefix, finalBlock := padToBlockSize(payload, blockSize)
- b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock))
- c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix)
- c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock)
- default:
- panic("unknown cipher type")
+ var dst []byte
+ switch c := hc.cipher.(type) {
+ case cipher.Stream:
+ record, dst = sliceForAppend(record, len(payload)+len(mac))
+ c.XORKeyStream(dst[:len(payload)], payload)
+ c.XORKeyStream(dst[len(payload):], mac)
+ case aead:
+ nonce := explicitNonce
+ if len(nonce) == 0 {
+ nonce = hc.seq[:]
}
+
+ copy(hc.additionalData[:], hc.seq[:])
+ copy(hc.additionalData[8:], record[:3])
+ hc.additionalData[11] = byte(len(payload) >> 8)
+ hc.additionalData[12] = byte(len(payload))
+
+ record = c.Seal(record, nonce, payload, hc.additionalData[:])
+ case cbcMode:
+ blockSize := c.BlockSize()
+ plaintextLen := len(payload) + len(mac)
+ paddingLen := blockSize - plaintextLen%blockSize
+ record, dst = sliceForAppend(record, plaintextLen+paddingLen)
+ copy(dst, payload)
+ copy(dst[len(payload):], mac)
+ for i := plaintextLen; i < len(dst); i++ {
+ dst[i] = byte(paddingLen - 1)
+ }
+ if len(explicitNonce) > 0 {
+ c.SetIV(explicitNonce)
+ }
+ c.CryptBlocks(dst, dst)
+ default:
+ panic("unknown cipher type")
}
- // update length to include MAC and any block padding needed.
- n := len(b.data) - recordHeaderLen
- b.data[3] = byte(n >> 8)
- b.data[4] = byte(n)
+ // Update length to include nonce, MAC and any block padding needed.
+ n := len(record) - recordHeaderLen
+ record[3] = byte(n >> 8)
+ record[4] = byte(n)
hc.incSeq()
- return true, 0
+ return record, nil
}
-// A block is a simple data buffer.
-type block struct {
- data []byte
- off int // index for Read
- link *block
-}
-
-// resize resizes block to be n bytes, growing if necessary.
-func (b *block) resize(n int) {
- if n > cap(b.data) {
- b.reserve(n)
- }
- b.data = b.data[0:n]
-}
-
-// reserve makes sure that block contains a capacity of at least n bytes.
-func (b *block) reserve(n int) {
- if cap(b.data) >= n {
- return
- }
- m := cap(b.data)
- if m == 0 {
- m = 1024
- }
- for m < n {
- m *= 2
- }
- data := make([]byte, len(b.data), m)
- copy(data, b.data)
- b.data = data
-}
-
-// readFromUntil reads from r into b until b contains at least n bytes
-// or else returns an error.
-func (b *block) readFromUntil(r io.Reader, n int) error {
- // quick case
- if len(b.data) >= n {
- return nil
- }
-
- // read until have enough.
- b.reserve(n)
- for {
- m, err := r.Read(b.data[len(b.data):cap(b.data)])
- b.data = b.data[0 : len(b.data)+m]
- if len(b.data) >= n {
- // TODO(bradfitz,agl): slightly suspicious
- // that we're throwing away r.Read's err here.
- break
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (b *block) Read(p []byte) (n int, err error) {
- n = copy(p, b.data[b.off:])
- b.off += n
- return
-}
-
-// newBlock allocates a new block, from hc's free list if possible.
-func (hc *halfConn) newBlock() *block {
- b := hc.bfree
- if b == nil {
- return new(block)
- }
- hc.bfree = b.link
- b.link = nil
- b.resize(0)
- return b
-}
-
-// freeBlock returns a block to hc's free list.
-// The protocol is such that each side only has a block or two on
-// its free list at a time, so there's no need to worry about
-// trimming the list, etc.
-func (hc *halfConn) freeBlock(b *block) {
- b.link = hc.bfree
- hc.bfree = b
-}
-
-// splitBlock splits a block after the first n bytes,
-// returning a block with those n bytes and a
-// block with the remainder. the latter may be nil.
-func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) {
- if len(b.data) <= n {
- return b, nil
- }
- bb := hc.newBlock()
- bb.resize(len(b.data) - n)
- copy(bb.data, b.data[n:])
- b.data = b.data[0:n]
- return b, bb
-}
-
-// RecordHeaderError results when a TLS record header is invalid.
+// RecordHeaderError is returned when a TLS record header is invalid.
type RecordHeaderError struct {
// Msg contains a human readable string that describes the error.
Msg string
@@ -557,7 +484,7 @@ func (e RecordHeaderError) Error() string { return "tls: " + e.Msg }
func (c *Conn) newRecordHeaderError(msg string) (err RecordHeaderError) {
err.Msg = msg
- copy(err.RecordHeader[:], c.rawInput.data)
+ copy(err.RecordHeader[:], c.rawInput.Bytes())
return err
}
@@ -569,40 +496,38 @@ func (c *Conn) readRecord(want recordType) error {
// else application data.
switch want {
default:
- c.sendAlert(alertInternalError)
- return c.in.setErrorLocked(errors.New("tls: unknown record type requested"))
+ panic("tls: unknown record type requested")
case recordTypeHandshake, recordTypeChangeCipherSpec:
if c.handshakeComplete() {
- c.sendAlert(alertInternalError)
- return c.in.setErrorLocked(errors.New("tls: handshake or ChangeCipherSpec requested while not in handshake"))
+ panic("tls: handshake or ChangeCipherSpec requested while not in handshake")
}
case recordTypeApplicationData:
if !c.handshakeComplete() {
- c.sendAlert(alertInternalError)
- return c.in.setErrorLocked(errors.New("tls: application data record requested while in handshake"))
+ panic("tls: application data record requested while in handshake")
}
}
-Again:
- if c.rawInput == nil {
- c.rawInput = c.in.newBlock()
+ // This function modifies c.rawInput, which owns the c.input memory.
+ if c.input.Len() != 0 {
+ panic("tls: attempted to read record with pending application data")
}
- b := c.rawInput
+ c.input.Reset(nil)
// Read header, payload.
- if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil {
- // RFC suggests that EOF without an alertCloseNotify is
- // an error, but popular web sites seem to do this,
- // so we can't make it an error.
- // if err == io.EOF {
- // err = io.ErrUnexpectedEOF
- // }
+ if err := c.readFromUntil(c.conn, recordHeaderLen); err != nil {
+ // RFC 8446, Section 6.1 suggests that EOF without an alertCloseNotify
+ // is an error, but popular web sites seem to do this, so we accept it
+ // if and only if at the record boundary.
+ if err == io.ErrUnexpectedEOF && c.rawInput.Len() == 0 {
+ err = io.EOF
+ }
if e, ok := err.(net.Error); !ok || !e.Temporary() {
c.in.setErrorLocked(err)
}
return err
}
- typ := recordType(b.data[0])
+ hdr := c.rawInput.Bytes()[:recordHeaderLen]
+ typ := recordType(hdr[0])
// No valid TLS record has a type of 0x80, however SSLv2 handshakes
// start with a uint16 length where the MSB is set and the first record
@@ -613,8 +538,8 @@ Again:
return c.in.setErrorLocked(c.newRecordHeaderError("unsupported SSLv2 handshake received"))
}
- vers := uint16(b.data[1])<<8 | uint16(b.data[2])
- n := int(b.data[3])<<8 | int(b.data[4])
+ vers := uint16(hdr[1])<<8 | uint16(hdr[2])
+ n := int(hdr[3])<<8 | int(hdr[4])
if c.haveVers && vers != c.vers {
c.sendAlert(alertProtocolVersion)
msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers)
@@ -635,10 +560,7 @@ Again:
return c.in.setErrorLocked(c.newRecordHeaderError("first record does not look like a TLS handshake"))
}
}
- if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
+ if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
if e, ok := err.(net.Error); !ok || !e.Temporary() {
c.in.setErrorLocked(err)
}
@@ -646,18 +568,13 @@ Again:
}
// Process message.
- b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n)
- ok, off, alertValue := c.in.decrypt(b)
- if !ok {
- c.in.freeBlock(b)
- return c.in.setErrorLocked(c.sendAlert(alertValue))
+ record := c.rawInput.Next(recordHeaderLen + n)
+ data, err := c.in.decrypt(record)
+ if err != nil {
+ return c.in.setErrorLocked(c.sendAlert(err.(alert)))
}
- b.off = off
- data := b.data[b.off:]
if len(data) > maxPlaintext {
- err := c.sendAlert(alertRecordOverflow)
- c.in.freeBlock(b)
- return c.in.setErrorLocked(err)
+ return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow))
}
if typ != recordTypeAlert && len(data) > 0 {
@@ -667,70 +584,97 @@ Again:
switch typ {
default:
- c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
case recordTypeAlert:
if len(data) != 2 {
- c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- break
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
if alert(data[1]) == alertCloseNotify {
- c.in.setErrorLocked(io.EOF)
- break
+ return c.in.setErrorLocked(io.EOF)
}
switch data[0] {
case alertLevelWarning:
- // drop on the floor
- c.in.freeBlock(b)
-
c.warnCount++
if c.warnCount > maxWarnAlertCount {
c.sendAlert(alertUnexpectedMessage)
return c.in.setErrorLocked(errors.New("tls: too many warn alerts"))
}
-
- goto Again
+ return c.readRecord(want) // Drop the record on the floor and retry.
case alertLevelError:
- c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
+ return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
default:
- c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
case recordTypeChangeCipherSpec:
if typ != want || len(data) != 1 || data[0] != 1 {
- c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- break
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
- // Handshake messages are not allowed to fragment across the CCS
+ // Handshake messages are not allowed to fragment across the CCS.
if c.hand.Len() > 0 {
- c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- break
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
- err := c.in.changeCipherSpec()
- if err != nil {
- c.in.setErrorLocked(c.sendAlert(err.(alert)))
+ if err := c.in.changeCipherSpec(); err != nil {
+ return c.in.setErrorLocked(c.sendAlert(err.(alert)))
}
+ return nil
case recordTypeApplicationData:
if typ != want {
- c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- break
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
- c.input = b
- b = nil
+ // Note that data is owned by c.rawInput, following the Next call above,
+ // to avoid copying the plaintext. This is safe because c.rawInput is
+ // not read from or written to until c.input is drained.
+ c.input.Reset(data)
+ return nil
case recordTypeHandshake:
- // TODO(rsc): Should at least pick off connection close.
if typ != want && !(c.isClient && c.config.Renegotiation != RenegotiateNever) {
return c.in.setErrorLocked(c.sendAlert(alertNoRenegotiation))
}
c.hand.Write(data)
+ return nil
}
+}
- if b != nil {
- c.in.freeBlock(b)
+// atLeastReader reads from R, stopping with EOF once at least N bytes have been
+// read. It is different from an io.LimitedReader in that it doesn't cut short
+// the last Read call, and in that it considers an early EOF an error.
+type atLeastReader struct {
+ R io.Reader
+ N int64
+}
+
+func (r *atLeastReader) Read(p []byte) (int, error) {
+ if r.N <= 0 {
+ return 0, io.EOF
}
- return c.in.err
+ n, err := r.R.Read(p)
+ r.N -= int64(n) // won't underflow unless len(p) >= n > 9223372036854775809
+ if r.N > 0 && err == io.EOF {
+ return n, io.ErrUnexpectedEOF
+ }
+ if r.N <= 0 && err == nil {
+ return n, io.EOF
+ }
+ return n, err
+}
+
+// readFromUntil reads from r into c.rawInput until c.rawInput contains
+// at least n bytes or else returns an error.
+func (c *Conn) readFromUntil(r io.Reader, n int) error {
+ if c.rawInput.Len() >= n {
+ return nil
+ }
+ needs := n - c.rawInput.Len()
+ // There might be extra input waiting on the wire. Make a best effort
+ // attempt to fetch it so that it can be used in (*Conn).Read to
+ // "predict" closeNotify alerts.
+ c.rawInput.Grow(needs + bytes.MinRead)
+ _, err := c.rawInput.ReadFrom(&atLeastReader{r, int64(needs)})
+ return err
}
// sendAlert sends a TLS alert message.
@@ -789,7 +733,7 @@ const (
//
// In the interests of simplicity and determinism, this code does not attempt
// to reset the record size once the connection is idle, however.
-func (c *Conn) maxPayloadSizeForWrite(typ recordType, explicitIVLen int) int {
+func (c *Conn) maxPayloadSizeForWrite(typ recordType) int {
if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData {
return maxPlaintext
}
@@ -799,16 +743,11 @@ func (c *Conn) maxPayloadSizeForWrite(typ recordType, explicitIVLen int) int {
}
// Subtract TLS overheads to get the maximum payload size.
- macSize := 0
- if c.out.mac != nil {
- macSize = c.out.mac.Size()
- }
-
- payloadBytes := tcpMSSEstimate - recordHeaderLen - explicitIVLen
+ payloadBytes := tcpMSSEstimate - recordHeaderLen - c.out.explicitNonceLen()
if c.out.cipher != nil {
switch ciph := c.out.cipher.(type) {
case cipher.Stream:
- payloadBytes -= macSize
+ payloadBytes -= c.out.mac.Size()
case cipher.AEAD:
payloadBytes -= ciph.Overhead()
case cbcMode:
@@ -818,7 +757,7 @@ func (c *Conn) maxPayloadSizeForWrite(typ recordType, explicitIVLen int) int {
payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
// The MAC is appended before padding so affects the
// payload size directly.
- payloadBytes -= macSize
+ payloadBytes -= c.out.mac.Size()
default:
panic("unknown cipher type")
}
@@ -864,63 +803,32 @@ func (c *Conn) flush() (int, error) {
// writeRecordLocked writes a TLS record with the given type and payload to the
// connection and updates the record layer state.
func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
- b := c.out.newBlock()
- defer c.out.freeBlock(b)
-
var n int
for len(data) > 0 {
- explicitIVLen := 0
- explicitIVIsSeq := false
-
- var cbc cbcMode
- if c.out.version >= VersionTLS11 {
- var ok bool
- if cbc, ok = c.out.cipher.(cbcMode); ok {
- explicitIVLen = cbc.BlockSize()
- }
- }
- if explicitIVLen == 0 {
- if c, ok := c.out.cipher.(aead); ok {
- explicitIVLen = c.explicitNonceLen()
-
- // The AES-GCM construction in TLS has an
- // explicit nonce so that the nonce can be
- // random. However, the nonce is only 8 bytes
- // which is too small for a secure, random
- // nonce. Therefore we use the sequence number
- // as the nonce.
- explicitIVIsSeq = explicitIVLen > 0
- }
- }
m := len(data)
- if maxPayload := c.maxPayloadSizeForWrite(typ, explicitIVLen); m > maxPayload {
+ if maxPayload := c.maxPayloadSizeForWrite(typ); m > maxPayload {
m = maxPayload
}
- b.resize(recordHeaderLen + explicitIVLen + m)
- b.data[0] = byte(typ)
+
+ _, c.outBuf = sliceForAppend(c.outBuf[:0], recordHeaderLen)
+ c.outBuf[0] = byte(typ)
vers := c.vers
if vers == 0 {
// Some TLS servers fail if the record version is
// greater than TLS 1.0 for the initial ClientHello.
vers = VersionTLS10
}
- b.data[1] = byte(vers >> 8)
- b.data[2] = byte(vers)
- b.data[3] = byte(m >> 8)
- b.data[4] = byte(m)
- if explicitIVLen > 0 {
- explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
- if explicitIVIsSeq {
- copy(explicitIV, c.out.seq[:])
- } else {
- if _, err := io.ReadFull(c.config.rand(), explicitIV); err != nil {
- return n, err
- }
- }
+ c.outBuf[1] = byte(vers >> 8)
+ c.outBuf[2] = byte(vers)
+ c.outBuf[3] = byte(m >> 8)
+ c.outBuf[4] = byte(m)
+
+ var err error
+ c.outBuf, err = c.out.encrypt(c.outBuf, data[:m], c.config.rand())
+ if err != nil {
+ return n, err
}
- copy(b.data[recordHeaderLen+explicitIVLen:], data)
- c.out.encrypt(b, explicitIVLen)
- if _, err := c.write(b.data); err != nil {
+ if _, err := c.write(c.outBuf); err != nil {
return n, err
}
n += m
@@ -1124,14 +1032,14 @@ func (c *Conn) handleRenegotiation() error {
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
-func (c *Conn) Read(b []byte) (n int, err error) {
- if err = c.Handshake(); err != nil {
- return
+func (c *Conn) Read(b []byte) (int, error) {
+ if err := c.Handshake(); err != nil {
+ return 0, err
}
if len(b) == 0 {
// Put this after Handshake, in case people were calling
// Read(nil) for the side effect of the Handshake.
- return
+ return 0, nil
}
c.in.Lock()
@@ -1141,9 +1049,8 @@ func (c *Conn) Read(b []byte) (n int, err error) {
// CBC IV. So this loop ignores a limited number of empty records.
const maxConsecutiveEmptyRecords = 100
for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ {
- for c.input == nil && c.in.err == nil {
+ for c.input.Len() == 0 && c.in.err == nil {
if err := c.readRecord(recordTypeApplicationData); err != nil {
- // Soft error, like EAGAIN
return 0, err
}
if c.hand.Len() > 0 {
@@ -1158,33 +1065,24 @@ func (c *Conn) Read(b []byte) (n int, err error) {
return 0, err
}
- n, err = c.input.Read(b)
- if c.input.off >= len(c.input.data) {
- c.in.freeBlock(c.input)
- c.input = nil
- }
+ n, _ := c.input.Read(b)
- // If a close-notify alert is waiting, read it so that
- // we can return (n, EOF) instead of (n, nil), to signal
- // to the HTTP response reading goroutine that the
- // connection is now closed. This eliminates a race
- // where the HTTP response reading goroutine would
- // otherwise not observe the EOF until its next read,
- // by which time a client goroutine might have already
- // tried to reuse the HTTP connection for a new
- // request.
- // See https://codereview.appspot.com/76400046
- // and https://golang.org/issue/3514
- if ri := c.rawInput; ri != nil &&
- n != 0 && err == nil &&
- c.input == nil && len(ri.data) > 0 && recordType(ri.data[0]) == recordTypeAlert {
- if recErr := c.readRecord(recordTypeApplicationData); recErr != nil {
- err = recErr // will be io.EOF on closeNotify
+ // If a close-notify alert is waiting, read it so that we can return (n,
+ // EOF) instead of (n, nil), to signal to the HTTP response reading
+ // goroutine that the connection is now closed. This eliminates a race
+ // where the HTTP response reading goroutine would otherwise not observe
+ // the EOF until its next read, by which time a client goroutine might
+ // have already tried to reuse the HTTP connection for a new request.
+ // See https://golang.org/cl/76400046 and https://golang.org/issue/3514
+ if n != 0 && c.input.Len() == 0 && c.rawInput.Len() > 0 &&
+ recordType(c.rawInput.Bytes()[0]) == recordTypeAlert {
+ if err := c.readRecord(recordTypeApplicationData); err != nil {
+ return n, err // will be io.EOF on closeNotify
}
}
- if n != 0 || err != nil {
- return n, err
+ if n != 0 {
+ return n, nil
}
}
diff --git a/src/crypto/tls/conn_test.go b/src/crypto/tls/conn_test.go
index 5c7f7ce2bb..f948717038 100644
--- a/src/crypto/tls/conn_test.go
+++ b/src/crypto/tls/conn_test.go
@@ -134,12 +134,13 @@ func TestCertificateSelection(t *testing.T) {
// Run with multiple crypto configs to test the logic for computing TLS record overheads.
func runDynamicRecordSizingTest(t *testing.T, config *Config) {
- clientConn, serverConn := net.Pipe()
+ clientConn, serverConn := localPipe(t)
serverConfig := config.Clone()
serverConfig.DynamicRecordSizingDisabled = false
tlsConn := Server(serverConn, serverConfig)
+ handshakeDone := make(chan struct{})
recordSizesChan := make(chan []int, 1)
go func() {
// This goroutine performs a TLS handshake over clientConn and
@@ -153,6 +154,7 @@ func runDynamicRecordSizingTest(t *testing.T, config *Config) {
t.Errorf("Error from client handshake: %v", err)
return
}
+ close(handshakeDone)
var recordHeader [recordHeaderLen]byte
var record []byte
@@ -192,6 +194,7 @@ func runDynamicRecordSizingTest(t *testing.T, config *Config) {
if err := tlsConn.Handshake(); err != nil {
t.Fatalf("Error from server handshake: %s", err)
}
+ <-handshakeDone
// The server writes these plaintexts in order.
plaintext := bytes.Join([][]byte{
@@ -269,7 +272,7 @@ func (conn *hairpinConn) Close() error {
func TestHairpinInClose(t *testing.T) {
// This tests that the underlying net.Conn can call back into the
// tls.Conn when being closed without deadlocking.
- client, server := net.Pipe()
+ client, server := localPipe(t)
defer server.Close()
defer client.Close()
diff --git a/src/crypto/tls/handshake_client.go b/src/crypto/tls/handshake_client.go
index 9f5731bb99..3ea81b7949 100644
--- a/src/crypto/tls/handshake_client.go
+++ b/src/crypto/tls/handshake_client.go
@@ -850,7 +850,7 @@ func mutualProtocol(protos, preferenceProtos []string) (string, bool) {
// hostnameInSNI converts name into an approriate hostname for SNI.
// Literal IP addresses and absolute FQDNs are not permitted as SNI values.
-// See https://tools.ietf.org/html/rfc6066#section-3.
+// See RFC 6066, Section 3.
func hostnameInSNI(name string) string {
host := name
if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go
index 1f1c93d102..dcd6914098 100644
--- a/src/crypto/tls/handshake_client_test.go
+++ b/src/crypto/tls/handshake_client_test.go
@@ -179,7 +179,7 @@ func (test *clientTest) connFromCommand() (conn *recordingConn, child *exec.Cmd,
var pemOut bytes.Buffer
pem.Encode(&pemOut, &pem.Block{Type: pemType + " PRIVATE KEY", Bytes: derBytes})
- keyPath := tempFile(string(pemOut.Bytes()))
+ keyPath := tempFile(pemOut.String())
defer os.Remove(keyPath)
var command []string
@@ -293,7 +293,7 @@ func (test *clientTest) run(t *testing.T, write bool) {
}
clientConn = recordingConn
} else {
- clientConn, serverConn = net.Pipe()
+ clientConn, serverConn = localPipe(t)
}
config := test.config
@@ -682,7 +682,7 @@ func TestClientResumption(t *testing.T) {
}
testResumeState := func(test string, didResume bool) {
- _, hs, err := testHandshake(clientConfig, serverConfig)
+ _, hs, err := testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("%s: handshake failed: %s", test, err)
}
@@ -800,7 +800,7 @@ func TestKeyLog(t *testing.T) {
serverConfig := testConfig.Clone()
serverConfig.KeyLogWriter = &serverBuf
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan bool)
go func() {
@@ -838,8 +838,8 @@ func TestKeyLog(t *testing.T) {
}
}
- checkKeylogLine("client", string(clientBuf.Bytes()))
- checkKeylogLine("server", string(serverBuf.Bytes()))
+ checkKeylogLine("client", clientBuf.String())
+ checkKeylogLine("server", serverBuf.String())
}
func TestHandshakeClientALPNMatch(t *testing.T) {
@@ -1021,7 +1021,7 @@ var hostnameInSNITests = []struct {
func TestHostnameInSNI(t *testing.T) {
for _, tt := range hostnameInSNITests {
- c, s := net.Pipe()
+ c, s := localPipe(t)
go func(host string) {
Client(c, &Config{ServerName: host, InsecureSkipVerify: true}).Handshake()
@@ -1059,7 +1059,7 @@ func TestServerSelectingUnconfiguredCipherSuite(t *testing.T) {
// This checks that the server can't select a cipher suite that the
// client didn't offer. See #13174.
- c, s := net.Pipe()
+ c, s := localPipe(t)
errChan := make(chan error, 1)
go func() {
@@ -1228,7 +1228,7 @@ func TestVerifyPeerCertificate(t *testing.T) {
}
for i, test := range tests {
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan error)
var clientCalled, serverCalled bool
@@ -1287,7 +1287,7 @@ func (b *brokenConn) Write(data []byte) (int, error) {
func TestFailedWrite(t *testing.T) {
// Test that a write error during the handshake is returned.
for _, breakAfter := range []int{0, 1} {
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan bool)
go func() {
@@ -1321,7 +1321,7 @@ func (wcc *writeCountingConn) Write(data []byte) (int, error) {
}
func TestBuffering(t *testing.T) {
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan bool)
clientWCC := &writeCountingConn{Conn: c}
@@ -1350,7 +1350,7 @@ func TestBuffering(t *testing.T) {
}
func TestAlertFlushing(t *testing.T) {
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan bool)
clientWCC := &writeCountingConn{Conn: c}
@@ -1399,7 +1399,7 @@ func TestHandshakeRace(t *testing.T) {
// order to provide some evidence that there are no races or deadlocks
// in the handshake locking.
for i := 0; i < 32; i++ {
- c, s := net.Pipe()
+ c, s := localPipe(t)
go func() {
server := Server(s, testConfig)
@@ -1430,7 +1430,7 @@ func TestHandshakeRace(t *testing.T) {
go func() {
<-startRead
var reply [1]byte
- if n, err := client.Read(reply[:]); err != nil || n != 1 {
+ if _, err := io.ReadFull(client, reply[:]); err != nil {
panic(err)
}
c.Close()
@@ -1559,7 +1559,7 @@ func TestGetClientCertificate(t *testing.T) {
err error
}
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan serverResult)
go func() {
@@ -1637,7 +1637,7 @@ RwBA9Xk1KBNF
}
func TestCloseClientConnectionOnIdleServer(t *testing.T) {
- clientConn, serverConn := net.Pipe()
+ clientConn, serverConn := localPipe(t)
client := Client(clientConn, testConfig.Clone())
go func() {
var b [1]byte
@@ -1647,8 +1647,8 @@ func TestCloseClientConnectionOnIdleServer(t *testing.T) {
client.SetWriteDeadline(time.Now().Add(time.Second))
err := client.Handshake()
if err != nil {
- if !strings.Contains(err.Error(), "read/write on closed pipe") {
- t.Errorf("Error expected containing 'read/write on closed pipe' but got '%s'", err.Error())
+ if err, ok := err.(net.Error); ok && err.Timeout() {
+ t.Errorf("Expected a closed network connection error but got '%s'", err.Error())
}
} else {
t.Errorf("Error expected, but no error returned")
diff --git a/src/crypto/tls/handshake_messages.go b/src/crypto/tls/handshake_messages.go
index a5bf10efb8..27004b2d69 100644
--- a/src/crypto/tls/handshake_messages.go
+++ b/src/crypto/tls/handshake_messages.go
@@ -155,7 +155,7 @@ func (m *clientHelloMsg) marshal() []byte {
z[3] = byte(l)
z = z[4:]
- // RFC 3546, section 3.1
+ // RFC 3546, Section 3.1
//
// struct {
// NameType name_type;
@@ -182,7 +182,7 @@ func (m *clientHelloMsg) marshal() []byte {
z = z[l:]
}
if m.ocspStapling {
- // RFC 4366, section 3.6
+ // RFC 4366, Section 3.6
z[0] = byte(extensionStatusRequest >> 8)
z[1] = byte(extensionStatusRequest)
z[2] = 0
@@ -192,7 +192,7 @@ func (m *clientHelloMsg) marshal() []byte {
z = z[9:]
}
if len(m.supportedCurves) > 0 {
- // https://tools.ietf.org/html/rfc4492#section-5.5.1
+ // RFC 4492, Section 5.5.1
z[0] = byte(extensionSupportedCurves >> 8)
z[1] = byte(extensionSupportedCurves)
l := 2 + 2*len(m.supportedCurves)
@@ -209,7 +209,7 @@ func (m *clientHelloMsg) marshal() []byte {
}
}
if len(m.supportedPoints) > 0 {
- // https://tools.ietf.org/html/rfc4492#section-5.5.2
+ // RFC 4492, Section 5.5.2
z[0] = byte(extensionSupportedPoints >> 8)
z[1] = byte(extensionSupportedPoints)
l := 1 + len(m.supportedPoints)
@@ -224,7 +224,7 @@ func (m *clientHelloMsg) marshal() []byte {
}
}
if m.ticketSupported {
- // https://tools.ietf.org/html/rfc5077#section-3.2
+ // RFC 5077, Section 3.2
z[0] = byte(extensionSessionTicket >> 8)
z[1] = byte(extensionSessionTicket)
l := len(m.sessionTicket)
@@ -235,7 +235,7 @@ func (m *clientHelloMsg) marshal() []byte {
z = z[len(m.sessionTicket):]
}
if len(m.supportedSignatureAlgorithms) > 0 {
- // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
+ // RFC 5246, Section 7.4.1.4.1
z[0] = byte(extensionSignatureAlgorithms >> 8)
z[1] = byte(extensionSignatureAlgorithms)
l := 2 + 2*len(m.supportedSignatureAlgorithms)
@@ -285,7 +285,7 @@ func (m *clientHelloMsg) marshal() []byte {
lengths[1] = byte(stringsLength)
}
if m.scts {
- // https://tools.ietf.org/html/rfc6962#section-3.3.1
+ // RFC 6962, Section 3.3.1
z[0] = byte(extensionSCT >> 8)
z[1] = byte(extensionSCT)
// zero uint16 for the zero-length extension_data
@@ -396,9 +396,8 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
}
if nameType == 0 {
m.serverName = string(d[:nameLen])
- // An SNI value may not include a
- // trailing dot. See
- // https://tools.ietf.org/html/rfc6066#section-3.
+ // An SNI value may not include a trailing dot.
+ // See RFC 6066, Section 3.
if strings.HasSuffix(m.serverName, ".") {
return false
}
@@ -414,7 +413,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
case extensionStatusRequest:
m.ocspStapling = length > 0 && data[0] == statusTypeOCSP
case extensionSupportedCurves:
- // https://tools.ietf.org/html/rfc4492#section-5.5.1
+ // RFC 4492, Section 5.5.1
if length < 2 {
return false
}
@@ -430,7 +429,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
d = d[2:]
}
case extensionSupportedPoints:
- // https://tools.ietf.org/html/rfc4492#section-5.5.2
+ // RFC 4492, Section 5.5.2
if length < 1 {
return false
}
@@ -441,11 +440,11 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
m.supportedPoints = make([]uint8, l)
copy(m.supportedPoints, data[1:])
case extensionSessionTicket:
- // https://tools.ietf.org/html/rfc5077#section-3.2
+ // RFC 5077, Section 3.2
m.ticketSupported = true
m.sessionTicket = data[:length]
case extensionSignatureAlgorithms:
- // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
+ // RFC 5246, Section 7.4.1.4.1
if length < 2 || length&1 != 0 {
return false
}
@@ -1224,7 +1223,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
return m.raw
}
- // See https://tools.ietf.org/html/rfc4346#section-7.4.4
+ // See RFC 4346, Section 7.4.4.
length := 1 + len(m.certificateTypes) + 2
casLength := 0
for _, ca := range m.certificateAuthorities {
@@ -1374,7 +1373,7 @@ func (m *certificateVerifyMsg) marshal() (x []byte) {
return m.raw
}
- // See https://tools.ietf.org/html/rfc4346#section-7.4.8
+ // See RFC 4346, Section 7.4.8.
siglength := len(m.signature)
length := 2 + siglength
if m.hasSignatureAndHash {
@@ -1452,7 +1451,7 @@ func (m *newSessionTicketMsg) marshal() (x []byte) {
return m.raw
}
- // See https://tools.ietf.org/html/rfc5077#section-3.3
+ // See RFC 5077, Section 3.3.
ticketLen := len(m.ticket)
length := 2 + 4 + ticketLen
x = make([]byte, 4+length)
diff --git a/src/crypto/tls/handshake_messages_test.go b/src/crypto/tls/handshake_messages_test.go
index 1bf12c9ff2..e24089b4ba 100644
--- a/src/crypto/tls/handshake_messages_test.go
+++ b/src/crypto/tls/handshake_messages_test.go
@@ -188,7 +188,7 @@ func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value {
numSCTs := rand.Intn(4)
m.scts = make([][]byte, numSCTs)
for i := range m.scts {
- m.scts[i] = randomBytes(rand.Intn(500), rand)
+ m.scts[i] = randomBytes(rand.Intn(500)+1, rand)
}
}
@@ -271,8 +271,7 @@ func (*sessionState) Generate(rand *rand.Rand, size int) reflect.Value {
}
func TestRejectEmptySCTList(t *testing.T) {
- // https://tools.ietf.org/html/rfc6962#section-3.3.1 specifies that
- // empty SCT lists are invalid.
+ // RFC 6962, Section 3.3.1 specifies that empty SCT lists are invalid.
var random [32]byte
sct := []byte{0x42, 0x42, 0x42, 0x42}
diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go
index f2ad2262f0..edd48de1da 100644
--- a/src/crypto/tls/handshake_server.go
+++ b/src/crypto/tls/handshake_server.go
@@ -49,7 +49,7 @@ func (c *Conn) serverHandshake() error {
return err
}
- // For an overview of TLS handshaking, see https://tools.ietf.org/html/rfc5246#section-7.3
+ // For an overview of TLS handshaking, see RFC 5246, Section 7.3.
c.buffering = true
if isResume {
// The client has included a session ticket and so we do an abbreviated handshake.
@@ -268,7 +268,7 @@ Curves:
return false, errors.New("tls: no cipher suite supported by both client and server")
}
- // See https://tools.ietf.org/html/rfc7507.
+ // See RFC 7507.
for _, id := range hs.clientHello.cipherSuites {
if id == TLS_FALLBACK_SCSV {
// The client is doing a fallback connection.
diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go
index c366f47b17..44c67ed063 100644
--- a/src/crypto/tls/handshake_server_test.go
+++ b/src/crypto/tls/handshake_server_test.go
@@ -70,10 +70,7 @@ func testClientHello(t *testing.T, serverConfig *Config, m handshakeMessage) {
}
func testClientHelloFailure(t *testing.T, serverConfig *Config, m handshakeMessage, expectedSubStr string) {
- // Create in-memory network connection,
- // send message to server. Should return
- // expected error.
- c, s := net.Pipe()
+ c, s := localPipe(t)
go func() {
cli := Client(c, testConfig)
if ch, ok := m.(*clientHelloMsg); ok {
@@ -201,25 +198,26 @@ func TestRenegotiationExtension(t *testing.T) {
cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA},
}
- var buf []byte
- c, s := net.Pipe()
+ bufChan := make(chan []byte)
+ c, s := localPipe(t)
go func() {
cli := Client(c, testConfig)
cli.vers = clientHello.vers
cli.writeRecord(recordTypeHandshake, clientHello.marshal())
- buf = make([]byte, 1024)
+ buf := make([]byte, 1024)
n, err := c.Read(buf)
if err != nil {
t.Errorf("Server read returned error: %s", err)
return
}
- buf = buf[:n]
c.Close()
+ bufChan <- buf[:n]
}()
Server(s, testConfig).Handshake()
+ buf := <-bufChan
if len(buf) < 5+4 {
t.Fatalf("Server returned short message of length %d", len(buf))
@@ -262,22 +260,27 @@ func TestTLS12OnlyCipherSuites(t *testing.T) {
supportedPoints: []uint8{pointFormatUncompressed},
}
- c, s := net.Pipe()
- var reply interface{}
- var clientErr error
+ c, s := localPipe(t)
+ replyChan := make(chan interface{})
go func() {
cli := Client(c, testConfig)
cli.vers = clientHello.vers
cli.writeRecord(recordTypeHandshake, clientHello.marshal())
- reply, clientErr = cli.readHandshake()
+ reply, err := cli.readHandshake()
c.Close()
+ if err != nil {
+ replyChan <- err
+ } else {
+ replyChan <- reply
+ }
}()
config := testConfig.Clone()
config.CipherSuites = clientHello.cipherSuites
Server(s, config).Handshake()
s.Close()
- if clientErr != nil {
- t.Fatal(clientErr)
+ reply := <-replyChan
+ if err, ok := reply.(error); ok {
+ t.Fatal(err)
}
serverHello, ok := reply.(*serverHelloMsg)
if !ok {
@@ -289,7 +292,7 @@ func TestTLS12OnlyCipherSuites(t *testing.T) {
}
func TestAlertForwarding(t *testing.T) {
- c, s := net.Pipe()
+ c, s := localPipe(t)
go func() {
Client(c, testConfig).sendAlert(alertUnknownCA)
c.Close()
@@ -303,7 +306,7 @@ func TestAlertForwarding(t *testing.T) {
}
func TestClose(t *testing.T) {
- c, s := net.Pipe()
+ c, s := localPipe(t)
go c.Close()
err := Server(s, testConfig).Handshake()
@@ -313,8 +316,8 @@ func TestClose(t *testing.T) {
}
}
-func testHandshake(clientConfig, serverConfig *Config) (serverState, clientState ConnectionState, err error) {
- c, s := net.Pipe()
+func testHandshake(t *testing.T, clientConfig, serverConfig *Config) (serverState, clientState ConnectionState, err error) {
+ c, s := localPipe(t)
done := make(chan bool)
go func() {
cli := Client(c, clientConfig)
@@ -341,7 +344,7 @@ func TestVersion(t *testing.T) {
clientConfig := &Config{
InsecureSkipVerify: true,
}
- state, _, err := testHandshake(clientConfig, serverConfig)
+ state, _, err := testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -360,7 +363,7 @@ func TestCipherSuitePreference(t *testing.T) {
CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_RC4_128_SHA},
InsecureSkipVerify: true,
}
- state, _, err := testHandshake(clientConfig, serverConfig)
+ state, _, err := testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -370,7 +373,7 @@ func TestCipherSuitePreference(t *testing.T) {
}
serverConfig.PreferServerCipherSuites = true
- state, _, err = testHandshake(clientConfig, serverConfig)
+ state, _, err = testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -391,7 +394,7 @@ func TestSCTHandshake(t *testing.T) {
clientConfig := &Config{
InsecureSkipVerify: true,
}
- _, state, err := testHandshake(clientConfig, serverConfig)
+ _, state, err := testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -420,13 +423,13 @@ func TestCrossVersionResume(t *testing.T) {
// Establish a session at TLS 1.1.
clientConfig.MaxVersion = VersionTLS11
- _, _, err := testHandshake(clientConfig, serverConfig)
+ _, _, err := testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
// The client session cache now contains a TLS 1.1 session.
- state, _, err := testHandshake(clientConfig, serverConfig)
+ state, _, err := testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -436,7 +439,7 @@ func TestCrossVersionResume(t *testing.T) {
// Test that the server will decline to resume at a lower version.
clientConfig.MaxVersion = VersionTLS10
- state, _, err = testHandshake(clientConfig, serverConfig)
+ state, _, err = testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -445,7 +448,7 @@ func TestCrossVersionResume(t *testing.T) {
}
// The client session cache now contains a TLS 1.0 session.
- state, _, err = testHandshake(clientConfig, serverConfig)
+ state, _, err = testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -455,7 +458,7 @@ func TestCrossVersionResume(t *testing.T) {
// Test that the server will decline to resume at a higher version.
clientConfig.MaxVersion = VersionTLS11
- state, _, err = testHandshake(clientConfig, serverConfig)
+ state, _, err = testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
@@ -579,7 +582,7 @@ func (test *serverTest) run(t *testing.T, write bool) {
}
serverConn = recordingConn
} else {
- clientConn, serverConn = net.Pipe()
+ clientConn, serverConn = localPipe(t)
}
config := test.config
if config == nil {
@@ -832,7 +835,7 @@ func TestHandshakeServerSNIGetCertificate(t *testing.T) {
nameToCert := config.NameToCertificate
config.NameToCertificate = nil
config.GetCertificate = func(clientHello *ClientHelloInfo) (*Certificate, error) {
- cert, _ := nameToCert[clientHello.ServerName]
+ cert := nameToCert[clientHello.ServerName]
return cert, nil
}
test := &serverTest{
@@ -1025,7 +1028,7 @@ func benchmarkHandshakeServer(b *testing.B, cipherSuite uint16, curve CurveID, c
config.Certificates[0].PrivateKey = key
config.BuildNameToCertificate()
- clientConn, serverConn := net.Pipe()
+ clientConn, serverConn := localPipe(b)
serverConn = &recordingConn{Conn: serverConn}
go func() {
client := Client(clientConn, testConfig)
@@ -1039,7 +1042,7 @@ func benchmarkHandshakeServer(b *testing.B, cipherSuite uint16, curve CurveID, c
flows := serverConn.(*recordingConn).flows
feeder := make(chan struct{})
- clientConn, serverConn = net.Pipe()
+ clientConn, serverConn = localPipe(b)
go func() {
for range feeder {
@@ -1051,10 +1054,10 @@ func benchmarkHandshakeServer(b *testing.B, cipherSuite uint16, curve CurveID, c
ff := make([]byte, len(f))
n, err := io.ReadFull(clientConn, ff)
if err != nil {
- b.Fatalf("#%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", i+1, err, n, len(ff), ff[:n], f)
+ b.Errorf("#%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", i+1, err, n, len(ff), ff[:n], f)
}
if !bytes.Equal(f, ff) {
- b.Fatalf("#%d: mismatch on read: got:%x want:%x", i+1, ff, f)
+ b.Errorf("#%d: mismatch on read: got:%x want:%x", i+1, ff, f)
}
}
}
@@ -1216,7 +1219,7 @@ func TestSNIGivenOnFailure(t *testing.T) {
// Erase the server's cipher suites to ensure the handshake fails.
serverConfig.CipherSuites = nil
- c, s := net.Pipe()
+ c, s := localPipe(t)
go func() {
cli := Client(c, testConfig)
cli.vers = clientHello.vers
@@ -1346,7 +1349,7 @@ func TestGetConfigForClient(t *testing.T) {
configReturned = config
return config, err
}
- c, s := net.Pipe()
+ c, s := localPipe(t)
done := make(chan error)
go func() {
@@ -1423,7 +1426,7 @@ var testECDSAPrivateKey = &ecdsa.PrivateKey{
var testP256PrivateKey, _ = x509.ParseECPrivateKey(fromHex("30770201010420012f3b52bc54c36ba3577ad45034e2e8efe1e6999851284cb848725cfe029991a00a06082a8648ce3d030107a14403420004c02c61c9b16283bbcc14956d886d79b358aa614596975f78cece787146abf74c2d5dc578c0992b4f3c631373479ebf3892efe53d21c4f4f1cc9a11c3536b7f75"))
func TestCloseServerConnectionOnIdleClient(t *testing.T) {
- clientConn, serverConn := net.Pipe()
+ clientConn, serverConn := localPipe(t)
server := Server(serverConn, testConfig.Clone())
go func() {
clientConn.Write([]byte{'0'})
@@ -1432,8 +1435,8 @@ func TestCloseServerConnectionOnIdleClient(t *testing.T) {
server.SetReadDeadline(time.Now().Add(time.Second))
err := server.Handshake()
if err != nil {
- if !strings.Contains(err.Error(), "read/write on closed pipe") {
- t.Errorf("Error expected containing 'read/write on closed pipe' but got '%s'", err.Error())
+ if err, ok := err.(net.Error); ok && err.Timeout() {
+ t.Errorf("Expected a closed network connection error but got '%s'", err.Error())
}
} else {
t.Errorf("Error expected, but no error returned")
diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go
index 4b3fa238f4..18d4624543 100644
--- a/src/crypto/tls/handshake_test.go
+++ b/src/crypto/tls/handshake_test.go
@@ -13,6 +13,7 @@ import (
"io"
"io/ioutil"
"net"
+ "os"
"os/exec"
"strconv"
"strings"
@@ -224,3 +225,45 @@ func tempFile(contents string) string {
file.Close()
return path
}
+
+// localListener is set up by TestMain and used by localPipe to create Conn
+// pairs like net.Pipe, but connected by an actual buffered TCP connection.
+var localListener struct {
+ sync.Mutex
+ net.Listener
+}
+
+func localPipe(t testing.TB) (net.Conn, net.Conn) {
+ localListener.Lock()
+ defer localListener.Unlock()
+ c := make(chan net.Conn)
+ go func() {
+ conn, err := localListener.Accept()
+ if err != nil {
+ t.Errorf("Failed to accept local connection: %v", err)
+ }
+ c <- conn
+ }()
+ addr := localListener.Addr()
+ c1, err := net.Dial(addr.Network(), addr.String())
+ if err != nil {
+ t.Fatalf("Failed to dial local connection: %v", err)
+ }
+ c2 := <-c
+ return c1, c2
+}
+
+func TestMain(m *testing.M) {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ l, err = net.Listen("tcp6", "[::1]:0")
+ }
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to open local listener: %v", err)
+ os.Exit(1)
+ }
+ localListener.Listener = l
+ exitCode := m.Run()
+ localListener.Close()
+ os.Exit(exitCode)
+}
diff --git a/src/crypto/tls/key_agreement.go b/src/crypto/tls/key_agreement.go
index 0754d1b389..6f62d1c2a2 100644
--- a/src/crypto/tls/key_agreement.go
+++ b/src/crypto/tls/key_agreement.go
@@ -12,10 +12,9 @@ import (
"crypto/sha1"
"crypto/x509"
"errors"
+ "golang_org/x/crypto/curve25519"
"io"
"math/big"
-
- "golang_org/x/crypto/curve25519"
)
var errClientKeyExchange = errors.New("tls: invalid ClientKeyExchange message")
@@ -200,7 +199,7 @@ NextCandidate:
ecdhePublic = elliptic.Marshal(curve, x, y)
}
- // https://tools.ietf.org/html/rfc4492#section-5.4
+ // See RFC 4492, Section 5.4.
serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic))
serverECDHParams[0] = 3 // named curve
serverECDHParams[1] = byte(ka.curveid >> 8)
diff --git a/src/crypto/tls/prf.go b/src/crypto/tls/prf.go
index a8cf21da15..a31a50d14f 100644
--- a/src/crypto/tls/prf.go
+++ b/src/crypto/tls/prf.go
@@ -16,14 +16,14 @@ import (
"hash"
)
-// Split a premaster secret in two as specified in RFC 4346, section 5.
+// Split a premaster secret in two as specified in RFC 4346, Section 5.
func splitPreMasterSecret(secret []byte) (s1, s2 []byte) {
s1 = secret[0 : (len(secret)+1)/2]
s2 = secret[len(secret)/2:]
return
}
-// pHash implements the P_hash function, as defined in RFC 4346, section 5.
+// pHash implements the P_hash function, as defined in RFC 4346, Section 5.
func pHash(result, secret, seed []byte, hash func() hash.Hash) {
h := hmac.New(hash, secret)
h.Write(seed)
@@ -44,7 +44,7 @@ func pHash(result, secret, seed []byte, hash func() hash.Hash) {
}
}
-// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5.
+// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, Section 5.
func prf10(result, secret, label, seed []byte) {
hashSHA1 := sha1.New
hashMD5 := md5.New
@@ -63,7 +63,7 @@ func prf10(result, secret, label, seed []byte) {
}
}
-// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, section 5.
+// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, Section 5.
func prf12(hashFunc func() hash.Hash) func(result, secret, label, seed []byte) {
return func(result, secret, label, seed []byte) {
labelAndSeed := make([]byte, len(label)+len(seed))
@@ -140,7 +140,7 @@ func prfForVersion(version uint16, suite *cipherSuite) func(result, secret, labe
}
// masterFromPreMasterSecret generates the master secret from the pre-master
-// secret. See https://tools.ietf.org/html/rfc5246#section-8.1
+// secret. See RFC 5246, Section 8.1.
func masterFromPreMasterSecret(version uint16, suite *cipherSuite, preMasterSecret, clientRandom, serverRandom []byte) []byte {
seed := make([]byte, 0, len(clientRandom)+len(serverRandom))
seed = append(seed, clientRandom...)
@@ -153,7 +153,7 @@ func masterFromPreMasterSecret(version uint16, suite *cipherSuite, preMasterSecr
// keysFromMasterSecret generates the connection keys from the master
// secret, given the lengths of the MAC key, cipher key and IV, as defined in
-// RFC 2246, section 6.3.
+// RFC 2246, Section 6.3.
func keysFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) {
seed := make([]byte, 0, len(serverRandom)+len(clientRandom))
seed = append(seed, serverRandom...)
@@ -353,8 +353,7 @@ func noExportedKeyingMaterial(label string, context []byte, length int) ([]byte,
return nil, errors.New("crypto/tls: ExportKeyingMaterial is unavailable when renegotiation is enabled")
}
-// ekmFromMasterSecret generates exported keying material as defined in
-// https://tools.ietf.org/html/rfc5705.
+// ekmFromMasterSecret generates exported keying material as defined in RFC 5705.
func ekmFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte) func(string, []byte, int) ([]byte, error) {
return func(label string, context []byte, length int) ([]byte, error) {
switch label {
diff --git a/src/crypto/tls/tls.go b/src/crypto/tls/tls.go
index 8fd4294315..51932882c0 100644
--- a/src/crypto/tls/tls.go
+++ b/src/crypto/tls/tls.go
@@ -11,6 +11,7 @@ package tls
// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
import (
+ "bytes"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
@@ -29,7 +30,10 @@ import (
// The configuration config must be non-nil and must include
// at least one certificate or else set GetCertificate.
func Server(conn net.Conn, config *Config) *Conn {
- return &Conn{conn: conn, config: config}
+ return &Conn{
+ conn: conn, config: config,
+ input: *bytes.NewReader(nil), // Issue 28269
+ }
}
// Client returns a new TLS client side connection
@@ -37,7 +41,10 @@ func Server(conn net.Conn, config *Config) *Conn {
// The config cannot be nil: users must set either ServerName or
// InsecureSkipVerify in the config.
func Client(conn net.Conn, config *Config) *Conn {
- return &Conn{conn: conn, config: config, isClient: true}
+ return &Conn{
+ conn: conn, config: config, isClient: true,
+ input: *bytes.NewReader(nil), // Issue 28269
+ }
}
// A listener implements a network listener (net.Listener) for TLS connections.
diff --git a/src/crypto/x509/pem_decrypt.go b/src/crypto/x509/pem_decrypt.go
index 0388d63e14..93d1e4a922 100644
--- a/src/crypto/x509/pem_decrypt.go
+++ b/src/crypto/x509/pem_decrypt.go
@@ -203,7 +203,7 @@ func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, al
// the data separately, but it doesn't seem worth the additional
// code.
copy(encrypted, data)
- // See RFC 1423, section 1.1
+ // See RFC 1423, Section 1.1.
for i := 0; i < pad; i++ {
encrypted = append(encrypted, byte(pad))
}
diff --git a/src/crypto/x509/pkix/pkix.go b/src/crypto/x509/pkix/pkix.go
index 3cc4d587e3..59c3b15c83 100644
--- a/src/crypto/x509/pkix/pkix.go
+++ b/src/crypto/x509/pkix/pkix.go
@@ -95,7 +95,7 @@ func (r RDNSequence) String() string {
type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
-// https://tools.ietf.org/html/rfc5280#section-4.1.2.4
+// RFC 5280, Section 4.1.2.4.
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
diff --git a/src/crypto/x509/root_aix.go b/src/crypto/x509/root_aix.go
new file mode 100644
index 0000000000..6d427739a4
--- /dev/null
+++ b/src/crypto/x509/root_aix.go
@@ -0,0 +1,10 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/var/ssl/certs/ca-bundle.crt",
+}
diff --git a/src/crypto/x509/root_darwin.go b/src/crypto/x509/root_darwin.go
index 9d7b3a6ffb..ae69a2fadd 100644
--- a/src/crypto/x509/root_darwin.go
+++ b/src/crypto/x509/root_darwin.go
@@ -16,7 +16,6 @@ import (
"io/ioutil"
"os"
"os/exec"
- "os/user"
"path/filepath"
"strings"
"sync"
@@ -67,17 +66,17 @@ func execSecurityRoots() (*CertPool, error) {
"/Library/Keychains/System.keychain",
}
- u, err := user.Current()
- if err != nil {
+ home := os.UserHomeDir()
+ if home == "" {
if debugExecDarwinRoots {
- println(fmt.Sprintf("crypto/x509: get current user: %v", err))
+ println("crypto/x509: can't get user home directory")
}
} else {
args = append(args,
- filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"),
+ filepath.Join(home, "/Library/Keychains/login.keychain"),
// Fresh installs of Sierra use a slightly different path for the login keychain
- filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain-db"),
+ filepath.Join(home, "/Library/Keychains/login.keychain-db"),
)
}
diff --git a/src/crypto/x509/root_unix.go b/src/crypto/x509/root_unix.go
index 8e7036234d..48de50b4ea 100644
--- a/src/crypto/x509/root_unix.go
+++ b/src/crypto/x509/root_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package x509
@@ -19,6 +19,7 @@ var certDirectories = []string{
"/usr/local/share/certs", // FreeBSD
"/etc/pki/tls/certs", // Fedora/RHEL
"/etc/openssl/certs", // NetBSD
+ "/var/ssl/certs", // AIX
}
const (
diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go
index b545f2b2dc..6d3e2b3f66 100644
--- a/src/crypto/x509/verify.go
+++ b/src/crypto/x509/verify.go
@@ -227,10 +227,9 @@ type rfc2821Mailbox struct {
}
// parseRFC2821Mailbox parses an email address into local and domain parts,
-// based on the ABNF for a “Mailbox” from RFC 2821. According to
-// https://tools.ietf.org/html/rfc5280#section-4.2.1.6 that's correct for an
-// rfc822Name from a certificate: “The format of an rfc822Name is a "Mailbox"
-// as defined in https://tools.ietf.org/html/rfc2821#section-4.1.2”.
+// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280,
+// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The
+// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”.
func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
if len(in) == 0 {
return mailbox, false
@@ -247,9 +246,8 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
// quoted-pair = ("\" text) / obs-qp
// text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text
//
- // (Names beginning with “obs-” are the obsolete syntax from
- // https://tools.ietf.org/html/rfc2822#section-4. Since it has
- // been 16 years, we no longer accept that.)
+ // (Names beginning with “obs-” are the obsolete syntax from RFC 2822,
+ // Section 4. Since it has been 16 years, we no longer accept that.)
in = in[1:]
QuotedString:
for {
@@ -303,7 +301,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
// Atom ("." Atom)*
NextChar:
for len(in) > 0 {
- // atext from https://tools.ietf.org/html/rfc2822#section-3.2.4
+ // atext from RFC 2822, Section 3.2.4
c := in[0]
switch {
@@ -339,7 +337,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
return mailbox, false
}
- // https://tools.ietf.org/html/rfc3696#section-3
+ // From RFC 3696, Section 3:
// “period (".") may also appear, but may not be used to start
// or end the local part, nor may two or more consecutive
// periods appear.”
@@ -420,7 +418,7 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro
}
func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
- // https://tools.ietf.org/html/rfc5280#section-4.2.1.10
+ // From RFC 5280, Section 4.2.1.10:
// “a uniformResourceIdentifier that does not include an authority
// component with a host name specified as a fully qualified domain
// name (e.g., if the URI either does not include an authority
@@ -999,7 +997,7 @@ func (c *Certificate) VerifyHostname(h string) error {
}
if ip := net.ParseIP(candidateIP); ip != nil {
// We only match IP addresses against IP SANs.
- // https://tools.ietf.org/html/rfc6125#appendix-B.2
+ // See RFC 6125, Appendix B.2.
for _, candidate := range c.IPAddresses {
if ip.Equal(candidate) {
return nil
diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go
index 2e72471de2..7e8f675886 100644
--- a/src/crypto/x509/x509.go
+++ b/src/crypto/x509/x509.go
@@ -24,6 +24,8 @@ import (
"encoding/pem"
"errors"
"fmt"
+ "golang_org/x/crypto/cryptobyte"
+ cryptobyte_asn1 "golang_org/x/crypto/cryptobyte/asn1"
"io"
"math/big"
"net"
@@ -32,9 +34,6 @@ import (
"strings"
"time"
"unicode/utf8"
-
- "golang_org/x/crypto/cryptobyte"
- cryptobyte_asn1 "golang_org/x/crypto/cryptobyte/asn1"
)
// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
@@ -78,7 +77,7 @@ func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorith
}
publicKeyAlgorithm.Algorithm = oidPublicKeyRSA
// This is a NULL parameters value which is required by
- // https://tools.ietf.org/html/rfc3279#section-2.3.1.
+ // RFC 3279, Section 2.3.1.
publicKeyAlgorithm.Parameters = asn1.NullRawValue
case *ecdsa.PublicKey:
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
@@ -334,7 +333,7 @@ var signatureAlgorithmDetails = []struct {
}
// pssParameters reflects the parameters in an AlgorithmIdentifier that
-// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3
+// specifies RSA PSS. See RFC 3447, Appendix A.2.3.
type pssParameters struct {
// The following three fields are not marked as
// optional because the default values specify SHA-1,
@@ -413,13 +412,11 @@ func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm
return UnknownSignatureAlgorithm
}
- // PSS is greatly overburdened with options. This code forces
- // them into three buckets by requiring that the MGF1 hash
- // function always match the message hash function (as
- // recommended in
- // https://tools.ietf.org/html/rfc3447#section-8.1), that the
- // salt length matches the hash length, and that the trailer
- // field has the default value.
+ // PSS is greatly overburdened with options. This code forces them into
+ // three buckets by requiring that the MGF1 hash function always match the
+ // message hash function (as recommended in RFC 3447, Section 8.1), that the
+ // salt length matches the hash length, and that the trailer field has the
+ // default value.
if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
!params.MGF.Algorithm.Equal(oidMGF1) ||
!mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
@@ -987,8 +984,8 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
asn1Data := keyData.PublicKey.RightAlign()
switch algo {
case RSA:
- // RSA public keys must have a NULL in the parameters
- // (https://tools.ietf.org/html/rfc3279#section-2.3.1).
+ // RSA public keys must have a NULL in the parameters.
+ // See RFC 3279, Section 2.3.1.
if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
return nil, errors.New("x509: RSA key missing NULL parameters")
}
@@ -1203,7 +1200,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
}
if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 {
- // https://tools.ietf.org/html/rfc5280#section-4.2.1.10:
+ // From RFC 5280, Section 4.2.1.10:
// “either the permittedSubtrees field
// or the excludedSubtrees MUST be
// present”
@@ -1798,7 +1795,7 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
!oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) {
ret[n].Id = oidExtensionSubjectAltName
- // https://tools.ietf.org/html/rfc5280#section-4.2.1.6
+ // From RFC 5280, Section 4.2.1.6:
// “If the subject field contains an empty sequence ... then
// subjectAltName extension ... is marked as critical”
ret[n].Critical = subjectIsEmpty
@@ -2357,8 +2354,7 @@ func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndVa
// parseCSRExtensions parses the attributes from a CSR and extracts any
// requested extensions.
func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) {
- // pkcs10Attribute reflects the Attribute structure from section 4.1 of
- // https://tools.ietf.org/html/rfc2986.
+ // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1.
type pkcs10Attribute struct {
Id asn1.ObjectIdentifier
Values []asn1.RawValue `asn1:"set"`
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index 7c8d46b100..1ffe252ee3 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -567,7 +567,6 @@ type finalCloser interface {
// addDep notes that x now depends on dep, and x's finalClose won't be
// called until all of x's dependencies are removed with removeDep.
func (db *DB) addDep(x finalCloser, dep interface{}) {
- //println(fmt.Sprintf("addDep(%T %p, %T %p)", x, x, dep, dep))
db.mu.Lock()
defer db.mu.Unlock()
db.addDepLocked(x, dep)
@@ -597,7 +596,6 @@ func (db *DB) removeDep(x finalCloser, dep interface{}) error {
}
func (db *DB) removeDepLocked(x finalCloser, dep interface{}) func() error {
- //println(fmt.Sprintf("removeDep(%T %p, %T %p)", x, x, dep, dep))
xdep, ok := db.dep[x]
if !ok {
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index e52091c3af..82f3f316c6 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -397,7 +397,7 @@ func TestQueryContextWait(t *testing.T) {
prepares0 := numPrepares(t, db)
// TODO(kardianos): convert this from using a timeout to using an explicit
- // cancel when the query signals that is is "executing" the query.
+ // cancel when the query signals that it is "executing" the query.
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
defer cancel()
@@ -597,7 +597,7 @@ func TestPoolExhaustOnCancel(t *testing.T) {
state := 0
// waiter will be called for all queries, including
- // initial setup queries. The state is only assigned when no
+ // initial setup queries. The state is only assigned when
// no queries are made.
//
// Only allow the first batch of queries to finish once the
diff --git a/src/debug/pe/file_test.go b/src/debug/pe/file_test.go
index 24cd673254..4f0510200f 100644
--- a/src/debug/pe/file_test.go
+++ b/src/debug/pe/file_test.go
@@ -298,6 +298,17 @@ const (
linkCgoExternal
)
+func getImageBase(f *File) uintptr {
+ switch oh := f.OptionalHeader.(type) {
+ case *OptionalHeader32:
+ return uintptr(oh.ImageBase)
+ case *OptionalHeader64:
+ return uintptr(oh.ImageBase)
+ default:
+ panic("unexpected optionalheader type")
+ }
+}
+
func testDWARF(t *testing.T, linktype int) {
if runtime.GOOS != "windows" {
t.Skip("skipping windows only test")
@@ -347,14 +358,15 @@ func testDWARF(t *testing.T, linktype int) {
if err != nil {
t.Fatalf("running test executable failed: %s %s", err, out)
}
+ t.Logf("Testprog output:\n%s", string(out))
- matches := regexp.MustCompile("main=(.*)\n").FindStringSubmatch(string(out))
+ matches := regexp.MustCompile("offset=(.*)\n").FindStringSubmatch(string(out))
if len(matches) < 2 {
t.Fatalf("unexpected program output: %s", out)
}
- wantaddr, err := strconv.ParseUint(matches[1], 0, 64)
+ wantoffset, err := strconv.ParseUint(matches[1], 0, 64)
if err != nil {
- t.Fatalf("unexpected main address %q: %s", matches[1], err)
+ t.Fatalf("unexpected main offset %q: %s", matches[1], err)
}
f, err := Open(exe)
@@ -363,6 +375,8 @@ func testDWARF(t *testing.T, linktype int) {
}
defer f.Close()
+ imageBase := getImageBase(f)
+
var foundDebugGDBScriptsSection bool
for _, sect := range f.Sections {
if sect.Name == ".debug_gdb_scripts" {
@@ -389,10 +403,20 @@ func testDWARF(t *testing.T, linktype int) {
break
}
if e.Tag == dwarf.TagSubprogram {
- if name, ok := e.Val(dwarf.AttrName).(string); ok && name == "main.main" {
- if addr, ok := e.Val(dwarf.AttrLowpc).(uint64); ok && addr == wantaddr {
- return
+ name, ok := e.Val(dwarf.AttrName).(string)
+ if ok && name == "main.main" {
+ t.Logf("Found main.main")
+ addr, ok := e.Val(dwarf.AttrLowpc).(uint64)
+ if !ok {
+ t.Fatal("Failed to get AttrLowpc")
}
+ offset := uintptr(addr) - imageBase
+ if offset != uintptr(wantoffset) {
+ t.Fatal("Runtime offset (0x%x) did "+
+ "not match dwarf offset "+
+ "(0x%x)", wantoffset, offset)
+ }
+ return
}
}
}
@@ -479,11 +503,52 @@ const testprog = `
package main
import "fmt"
+import "syscall"
+import "unsafe"
{{if .}}import "C"
{{end}}
+// struct MODULEINFO from the Windows SDK
+type moduleinfo struct {
+ BaseOfDll uintptr
+ SizeOfImage uint32
+ EntryPoint uintptr
+}
+
+func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+func funcPC(f interface{}) uintptr {
+ var a uintptr
+ return **(**uintptr)(add(unsafe.Pointer(&f), unsafe.Sizeof(a)))
+}
+
func main() {
+ kernel32 := syscall.MustLoadDLL("kernel32.dll")
+ psapi := syscall.MustLoadDLL("psapi.dll")
+ getModuleHandle := kernel32.MustFindProc("GetModuleHandleW")
+ getCurrentProcess := kernel32.MustFindProc("GetCurrentProcess")
+ getModuleInformation := psapi.MustFindProc("GetModuleInformation")
+
+ procHandle, _, _ := getCurrentProcess.Call()
+ moduleHandle, _, err := getModuleHandle.Call(0)
+ if moduleHandle == 0 {
+ panic(fmt.Sprintf("GetModuleHandle() failed: %d", err))
+ }
+
+ var info moduleinfo
+ ret, _, err := getModuleInformation.Call(procHandle, moduleHandle,
+ uintptr(unsafe.Pointer(&info)), unsafe.Sizeof(info))
+
+ if ret == 0 {
+ panic(fmt.Sprintf("GetModuleInformation() failed: %d", err))
+ }
+
+ offset := funcPC(main) - info.BaseOfDll
+ fmt.Printf("base=0x%x\n", info.BaseOfDll)
fmt.Printf("main=%p\n", main)
+ fmt.Printf("offset=0x%x\n", offset)
}
`
@@ -535,7 +600,11 @@ func TestBuildingWindowsGUI(t *testing.T) {
func TestImportTableInUnknownSection(t *testing.T) {
if runtime.GOOS != "windows" {
- t.Skip("skipping windows only test")
+ t.Skip("skipping Windows-only test")
+ }
+ if runtime.GOARCH == "arm" {
+ // Issue 27904
+ t.Skip("skipping test on arm; no atmfd.dll available")
}
// first we need to find this font driver
diff --git a/src/debug/plan9obj/file.go b/src/debug/plan9obj/file.go
index c78e35d000..314608da61 100644
--- a/src/debug/plan9obj/file.go
+++ b/src/debug/plan9obj/file.go
@@ -274,7 +274,7 @@ func newTable(symtab []byte, ptrsz int) ([]Sym, error) {
ts.Value = s.value
switch s.typ {
default:
- ts.Name = string(s.name[:])
+ ts.Name = string(s.name)
case 'z', 'Z':
for i := 0; i < len(s.name); i += 2 {
eltIdx := binary.BigEndian.Uint16(s.name[i : i+2])
diff --git a/src/encoding/base64/base64.go b/src/encoding/base64/base64.go
index e8afc48859..0bb37b311a 100644
--- a/src/encoding/base64/base64.go
+++ b/src/encoding/base64/base64.go
@@ -270,7 +270,7 @@ func (e CorruptInputError) Error() string {
return "illegal base64 data at input byte " + strconv.FormatInt(int64(e), 10)
}
-// decodeQuantum decodes up to 4 base64 bytes. It takes for parameters
+// decodeQuantum decodes up to 4 base64 bytes. The received parameters are
// the destination buffer dst, the source buffer src and an index in the
// source buffer si.
// It returns the number of bytes read from src, the number of bytes written
diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go
index 5ef0388862..f4f740ef42 100644
--- a/src/encoding/gob/decoder.go
+++ b/src/encoding/gob/decoder.go
@@ -12,10 +12,10 @@ import (
"sync"
)
-// tooBig provides a sanity check for sizes; used in several places.
-// Upper limit of 1GB, allowing room to grow a little without overflow.
-// TODO: make this adjustable?
-const tooBig = 1 << 30
+// tooBig provides a sanity check for sizes; used in several places. Upper limit
+// of is 1GB on 32-bit systems, 8GB on 64-bit, allowing room to grow a little
+// without overflow.
+const tooBig = (1 << 30) << (^uint(0) >> 62)
// A Decoder manages the receipt of type and data information read from the
// remote side of a connection.
diff --git a/src/encoding/gob/encoder_test.go b/src/encoding/gob/encoder_test.go
index dc9bbcf35d..825f0d6f03 100644
--- a/src/encoding/gob/encoder_test.go
+++ b/src/encoding/gob/encoder_test.go
@@ -10,7 +10,6 @@ import (
"fmt"
"io/ioutil"
"reflect"
- "runtime"
"strings"
"testing"
)
@@ -1128,24 +1127,3 @@ func TestBadData(t *testing.T) {
}
}
}
-
-// TestHugeWriteFails tests that enormous messages trigger an error.
-func TestHugeWriteFails(t *testing.T) {
- if runtime.GOARCH == "wasm" {
- t.Skip("out of memory on wasm")
- }
- if testing.Short() {
- // Requires allocating a monster, so don't do this from all.bash.
- t.Skip("skipping huge allocation in short mode")
- }
- huge := make([]byte, tooBig)
- huge[0] = 7 // Make sure it's not all zeros.
- buf := new(bytes.Buffer)
- err := NewEncoder(buf).Encode(huge)
- if err == nil {
- t.Fatalf("expected error for huge slice")
- }
- if !strings.Contains(err.Error(), "message too big") {
- t.Fatalf("expected 'too big' error; got %s\n", err.Error())
- }
-}
diff --git a/src/encoding/json/decode.go b/src/encoding/json/decode.go
index cab4616ba3..731553dca6 100644
--- a/src/encoding/json/decode.go
+++ b/src/encoding/json/decode.go
@@ -473,7 +473,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
- if v.Type().NumMethod() > 0 {
+ if v.Type().NumMethod() > 0 && v.CanInterface() {
if u, ok := v.Interface().(Unmarshaler); ok {
return u, nil, reflect.Value{}
}
@@ -786,7 +786,7 @@ func (d *decodeState) object(v reflect.Value) error {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || reflect.Zero(kt).OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
- return nil
+ break
}
kv = reflect.ValueOf(n).Convert(kt)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
@@ -794,14 +794,16 @@ func (d *decodeState) object(v reflect.Value) error {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || reflect.Zero(kt).OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
- return nil
+ break
}
kv = reflect.ValueOf(n).Convert(kt)
default:
panic("json: Unexpected key type") // should never occur
}
}
- v.SetMapIndex(kv, subv)
+ if kv.IsValid() {
+ v.SetMapIndex(kv, subv)
+ }
}
// Next token must be , or }.
diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go
index 5fbe67a706..54432600a5 100644
--- a/src/encoding/json/decode_test.go
+++ b/src/encoding/json/decode_test.go
@@ -266,6 +266,10 @@ type XYZ struct {
Z interface{}
}
+type unexportedWithMethods struct{}
+
+func (unexportedWithMethods) F() {}
+
func sliceAddr(x []int) *[]int { return &x }
func mapAddr(x map[string]int) *map[string]int { return &x }
@@ -554,6 +558,16 @@ var unmarshalTests = []unmarshalTest{
ptr: new(map[uint8]string),
err: &UnmarshalTypeError{Value: "number -1", Type: reflect.TypeOf(uint8(0)), Offset: 2},
},
+ {
+ in: `{"F":{"a":2,"3":4}}`,
+ ptr: new(map[string]map[int]int),
+ err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(int(0)), Offset: 7},
+ },
+ {
+ in: `{"F":{"a":2,"3":4}}`,
+ ptr: new(map[string]map[uint]int),
+ err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(uint(0)), Offset: 7},
+ },
// Map keys can be encoding.TextUnmarshalers.
{in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY},
@@ -2141,6 +2155,9 @@ func TestInvalidStringOption(t *testing.T) {
//
// (Issue 24152) If the embedded struct is given an explicit name,
// ensure that the normal unmarshal logic does not panic in reflect.
+//
+// (Issue 28145) If the embedded struct is given an explicit name and has
+// exported methods, don't cause a panic trying to get its value.
func TestUnmarshalEmbeddedUnexported(t *testing.T) {
type (
embed1 struct{ Q int }
@@ -2180,6 +2197,9 @@ func TestUnmarshalEmbeddedUnexported(t *testing.T) {
embed2 `json:"embed2"`
Q int
}
+ S9 struct {
+ unexportedWithMethods `json:"embed"`
+ }
)
tests := []struct {
@@ -2241,6 +2261,11 @@ func TestUnmarshalEmbeddedUnexported(t *testing.T) {
in: `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`,
ptr: new(S8),
out: &S8{embed1{1}, embed2{2}, 3},
+ }, {
+ // Issue 228145, similar to the cases above.
+ in: `{"embed": {}}`,
+ ptr: new(S9),
+ out: &S9{},
}}
for i, tt := range tests {
diff --git a/src/encoding/json/scanner.go b/src/encoding/json/scanner.go
index 9e6d482e16..88572245fc 100644
--- a/src/encoding/json/scanner.go
+++ b/src/encoding/json/scanner.go
@@ -289,7 +289,7 @@ func stateEndValue(s *scanner, c byte) int {
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
- if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ if !isSpace(c) {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
diff --git a/src/expvar/expvar.go b/src/expvar/expvar.go
index b7928aab17..976b300d63 100644
--- a/src/expvar/expvar.go
+++ b/src/expvar/expvar.go
@@ -137,7 +137,7 @@ func (v *Map) Init() *Map {
return v
}
-// updateKeys updates the sorted list of keys in v.keys.
+// addKey updates the sorted list of keys in v.keys.
func (v *Map) addKey(key string) {
v.keysMu.Lock()
defer v.keysMu.Unlock()
@@ -199,6 +199,17 @@ func (v *Map) AddFloat(key string, delta float64) {
}
}
+// Deletes the given key from the map.
+func (v *Map) Delete(key string) {
+ v.keysMu.Lock()
+ defer v.keysMu.Unlock()
+ i := sort.SearchStrings(v.keys, key)
+ if i < len(v.keys) && key == v.keys[i] {
+ v.keys = append(v.keys[:i], v.keys[i+1:]...)
+ v.m.Delete(key)
+ }
+}
+
// Do calls f for each entry in the map.
// The map is locked during the iteration,
// but existing entries may be concurrently updated.
diff --git a/src/expvar/expvar_test.go b/src/expvar/expvar_test.go
index 728e763896..804b56c1aa 100644
--- a/src/expvar/expvar_test.go
+++ b/src/expvar/expvar_test.go
@@ -183,6 +183,43 @@ func TestMapInit(t *testing.T) {
}
}
+func TestMapDelete(t *testing.T) {
+ RemoveAll()
+ colors := NewMap("bike-shed-colors")
+
+ colors.Add("red", 1)
+ colors.Add("red", 2)
+ colors.Add("blue", 4)
+
+ n := 0
+ colors.Do(func(KeyValue) { n++ })
+ if n != 2 {
+ t.Errorf("after two Add calls with distinct keys, Do should invoke f 2 times; got %v", n)
+ }
+
+ colors.Delete("red")
+ n = 0
+ colors.Do(func(KeyValue) { n++ })
+ if n != 1 {
+ t.Errorf("removed red, Do should invoke f 1 times; got %v", n)
+ }
+
+ colors.Delete("notfound")
+ n = 0
+ colors.Do(func(KeyValue) { n++ })
+ if n != 1 {
+ t.Errorf("attempted to remove notfound, Do should invoke f 1 times; got %v", n)
+ }
+
+ colors.Delete("blue")
+ colors.Delete("blue")
+ n = 0
+ colors.Do(func(KeyValue) { n++ })
+ if n != 0 {
+ t.Errorf("all keys removed, Do should invoke f 0 times; got %v", n)
+ }
+}
+
func TestMapCounter(t *testing.T) {
RemoveAll()
colors := NewMap("bike-shed-colors")
diff --git a/src/flag/flag.go b/src/flag/flag.go
index ae84e1f775..2eef9d6ab9 100644
--- a/src/flag/flag.go
+++ b/src/flag/flag.go
@@ -83,6 +83,28 @@ import (
// but no such flag is defined.
var ErrHelp = errors.New("flag: help requested")
+// errParse is returned by Set if a flag's value fails to parse, such as with an invalid integer for Int.
+// It then gets wrapped through failf to provide more information.
+var errParse = errors.New("parse error")
+
+// errRange is returned by Set if a flag's value is out of range.
+// It then gets wrapped through failf to provide more information.
+var errRange = errors.New("value out of range")
+
+func numError(err error) error {
+ ne, ok := err.(*strconv.NumError)
+ if !ok {
+ return err
+ }
+ if ne.Err == strconv.ErrSyntax {
+ return errParse
+ }
+ if ne.Err == strconv.ErrRange {
+ return errRange
+ }
+ return err
+}
+
// -- bool Value
type boolValue bool
@@ -93,6 +115,9 @@ func newBoolValue(val bool, p *bool) *boolValue {
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
+ if err != nil {
+ err = errParse
+ }
*b = boolValue(v)
return err
}
@@ -120,6 +145,9 @@ func newIntValue(val int, p *int) *intValue {
func (i *intValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, strconv.IntSize)
+ if err != nil {
+ err = numError(err)
+ }
*i = intValue(v)
return err
}
@@ -138,6 +166,9 @@ func newInt64Value(val int64, p *int64) *int64Value {
func (i *int64Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
+ if err != nil {
+ err = numError(err)
+ }
*i = int64Value(v)
return err
}
@@ -156,6 +187,9 @@ func newUintValue(val uint, p *uint) *uintValue {
func (i *uintValue) Set(s string) error {
v, err := strconv.ParseUint(s, 0, strconv.IntSize)
+ if err != nil {
+ err = numError(err)
+ }
*i = uintValue(v)
return err
}
@@ -174,6 +208,9 @@ func newUint64Value(val uint64, p *uint64) *uint64Value {
func (i *uint64Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
+ if err != nil {
+ err = numError(err)
+ }
*i = uint64Value(v)
return err
}
@@ -209,6 +246,9 @@ func newFloat64Value(val float64, p *float64) *float64Value {
func (f *float64Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ err = numError(err)
+ }
*f = float64Value(v)
return err
}
@@ -227,6 +267,9 @@ func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
func (d *durationValue) Set(s string) error {
v, err := time.ParseDuration(s)
+ if err != nil {
+ err = errParse
+ }
*d = durationValue(v)
return err
}
diff --git a/src/flag/flag_test.go b/src/flag/flag_test.go
index c7f0c07d44..0d9491c020 100644
--- a/src/flag/flag_test.go
+++ b/src/flag/flag_test.go
@@ -9,6 +9,7 @@ import (
. "flag"
"fmt"
"io"
+ "io/ioutil"
"os"
"sort"
"strconv"
@@ -491,3 +492,55 @@ func TestGetters(t *testing.T) {
t.Errorf("unexpected output: got %v, expected %v", fs.Output(), expectedOutput)
}
}
+
+func TestParseError(t *testing.T) {
+ for _, typ := range []string{"bool", "int", "int64", "uint", "uint64", "float64", "duration"} {
+ fs := NewFlagSet("parse error test", ContinueOnError)
+ fs.SetOutput(ioutil.Discard)
+ _ = fs.Bool("bool", false, "")
+ _ = fs.Int("int", 0, "")
+ _ = fs.Int64("int64", 0, "")
+ _ = fs.Uint("uint", 0, "")
+ _ = fs.Uint64("uint64", 0, "")
+ _ = fs.Float64("float64", 0, "")
+ _ = fs.Duration("duration", 0, "")
+ // Strings cannot give errors.
+ args := []string{"-" + typ + "=x"}
+ err := fs.Parse(args) // x is not a valid setting for any flag.
+ if err == nil {
+ t.Errorf("Parse(%q)=%v; expected parse error", args, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), "invalid") || !strings.Contains(err.Error(), "parse error") {
+ t.Errorf("Parse(%q)=%v; expected parse error", args, err)
+ }
+ }
+}
+
+func TestRangeError(t *testing.T) {
+ bad := []string{
+ "-int=123456789012345678901",
+ "-int64=123456789012345678901",
+ "-uint=123456789012345678901",
+ "-uint64=123456789012345678901",
+ "-float64=1e1000",
+ }
+ for _, arg := range bad {
+ fs := NewFlagSet("parse error test", ContinueOnError)
+ fs.SetOutput(ioutil.Discard)
+ _ = fs.Int("int", 0, "")
+ _ = fs.Int64("int64", 0, "")
+ _ = fs.Uint("uint", 0, "")
+ _ = fs.Uint64("uint64", 0, "")
+ _ = fs.Float64("float64", 0, "")
+ // Strings cannot give errors, and bools and durations do not return strconv.NumError.
+ err := fs.Parse([]string{arg})
+ if err == nil {
+ t.Errorf("Parse(%q)=%v; expected range error", arg, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), "invalid") || !strings.Contains(err.Error(), "value out of range") {
+ t.Errorf("Parse(%q)=%v; expected range error", arg, err)
+ }
+ }
+}
diff --git a/src/fmt/example_test.go b/src/fmt/example_test.go
index ecf3391ce7..c285175976 100644
--- a/src/fmt/example_test.go
+++ b/src/fmt/example_test.go
@@ -63,6 +63,19 @@ func ExampleFscanln() {
// 3: ken, 271828, 3.141590
}
+func ExampleSscanf() {
+ var name string
+ var age int
+ n, err := fmt.Sscanf("Kim is 22 years old", "%s is %d years old", &name, &age)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("%d: %s, %d\n", n, name, age)
+
+ // Output:
+ // 2: Kim, 22
+}
+
func ExamplePrint() {
const name, age = "Kim", 22
fmt.Print(name, " is ", age, " years old.\n")
diff --git a/src/fmt/fmt_test.go b/src/fmt/fmt_test.go
index d63271a805..e97372225c 100644
--- a/src/fmt/fmt_test.go
+++ b/src/fmt/fmt_test.go
@@ -686,11 +686,10 @@ var fmtTests = []struct {
{"%#v", 1.2345678, "1.2345678"},
{"%#v", float32(1.2345678), "1.2345678"},
- // Whole number floats should have a single trailing zero added, but not
- // for exponent notation.
- {"%#v", 1.0, "1.0"},
+ // Whole number floats are printed without decimals. See Issue 27634.
+ {"%#v", 1.0, "1"},
{"%#v", 1000000.0, "1e+06"},
- {"%#v", float32(1.0), "1.0"},
+ {"%#v", float32(1.0), "1"},
{"%#v", float32(1000000.0), "1e+06"},
// Only print []byte and []uint8 as type []byte if they appear at the top level.
diff --git a/src/fmt/format.go b/src/fmt/format.go
index 3a3cd8d1a1..91103f2c07 100644
--- a/src/fmt/format.go
+++ b/src/fmt/format.go
@@ -481,19 +481,15 @@ func (f *fmt) fmtFloat(v float64, size int, verb rune, prec int) {
return
}
// The sharp flag forces printing a decimal point for non-binary formats
- // and retains trailing zeros, which we may need to restore. For the sharpV
- // flag, we ensure a single trailing zero is present if the output is not
- // in exponent notation.
- if f.sharpV || (f.sharp && verb != 'b') {
+ // and retains trailing zeros, which we may need to restore.
+ if f.sharp && verb != 'b' {
digits := 0
- if !f.sharpV {
- switch verb {
- case 'g', 'G':
- digits = prec
- // If no precision is set explicitly use a precision of 6.
- if digits == -1 {
- digits = 6
- }
+ switch verb {
+ case 'v', 'g', 'G':
+ digits = prec
+ // If no precision is set explicitly use a precision of 6.
+ if digits == -1 {
+ digits = 6
}
}
@@ -502,32 +498,25 @@ func (f *fmt) fmtFloat(v float64, size int, verb rune, prec int) {
var tailBuf [5]byte
tail := tailBuf[:0]
- var hasDecimalPoint, hasExponent bool
+ hasDecimalPoint := false
// Starting from i = 1 to skip sign at num[0].
for i := 1; i < len(num); i++ {
switch num[i] {
case '.':
hasDecimalPoint = true
case 'e', 'E':
- hasExponent = true
tail = append(tail, num[i:]...)
num = num[:i]
default:
digits--
}
}
- if f.sharpV {
- if !hasDecimalPoint && !hasExponent {
- num = append(num, '.', '0')
- }
- } else {
- if !hasDecimalPoint {
- num = append(num, '.')
- }
- for digits > 0 {
- num = append(num, '0')
- digits--
- }
+ if !hasDecimalPoint {
+ num = append(num, '.')
+ }
+ for digits > 0 {
+ num = append(num, '0')
+ digits--
}
num = append(num, tail...)
}
diff --git a/src/fmt/print.go b/src/fmt/print.go
index 32743d0712..22dc52ccdc 100644
--- a/src/fmt/print.go
+++ b/src/fmt/print.go
@@ -6,6 +6,7 @@ package fmt
import (
"errors"
+ "internal/fmtsort"
"io"
"os"
"reflect"
@@ -753,8 +754,8 @@ func (p *pp) printValue(value reflect.Value, verb rune, depth int) {
} else {
p.buf.WriteString(mapString)
}
- iter := f.MapRange()
- for i := 0; iter.Next(); i++ {
+ sorted := fmtsort.Sort(f)
+ for i, key := range sorted.Key {
if i > 0 {
if p.fmt.sharpV {
p.buf.WriteString(commaSpaceString)
@@ -762,9 +763,9 @@ func (p *pp) printValue(value reflect.Value, verb rune, depth int) {
p.buf.WriteByte(' ')
}
}
- p.printValue(iter.Key(), verb, depth+1)
+ p.printValue(key, verb, depth+1)
p.buf.WriteByte(':')
- p.printValue(iter.Value(), verb, depth+1)
+ p.printValue(sorted.Value[i], verb, depth+1)
}
if p.fmt.sharpV {
p.buf.WriteByte('}')
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index c15706fb6b..c3ed0b4983 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -36,9 +36,10 @@ var pkgDeps = map[string][]string{
// L0 is the lowest level, core, nearly unavoidable packages.
"errors": {},
"io": {"errors", "sync", "sync/atomic"},
- "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "internal/cpu", "internal/bytealg"},
+ "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "runtime/internal/math", "internal/cpu", "internal/bytealg"},
"runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe", "internal/cpu"},
+ "runtime/internal/math": {"runtime/internal/sys"},
"internal/race": {"runtime", "unsafe"},
"sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
@@ -114,6 +115,7 @@ var pkgDeps = map[string][]string{
"image": {"L2", "image/color"}, // interfaces
"image/color": {"L2"}, // interfaces
"image/color/palette": {"L2", "image/color"},
+ "internal/fmtsort": {"reflect", "sort"},
"reflect": {"L2"},
"sort": {"reflect"},
@@ -141,6 +143,7 @@ var pkgDeps = map[string][]string{
"image",
"image/color",
"image/color/palette",
+ "internal/fmtsort",
"reflect",
},
@@ -183,8 +186,8 @@ var pkgDeps = map[string][]string{
"time",
},
- // Formatted I/O: few dependencies (L1) but we must add reflect.
- "fmt": {"L1", "os", "reflect"},
+ // Formatted I/O: few dependencies (L1) but we must add reflect and internal/fmtsort.
+ "fmt": {"L1", "os", "reflect", "internal/fmtsort"},
"log": {"L1", "os", "fmt", "time"},
// Packages used by testing must be low-level (L2+fmt).
@@ -276,6 +279,8 @@ var pkgDeps = map[string][]string{
"internal/goroot": {"L4", "OS"},
"internal/singleflight": {"sync"},
"internal/trace": {"L4", "OS"},
+ "internal/traceparser": {"L4", "internal/traceparser/filebuf"},
+ "internal/traceparser/filebuf": {"L4", "OS"},
"math/big": {"L4"},
"mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
"mime/quotedprintable": {"L4"},
@@ -321,7 +326,7 @@ var pkgDeps = map[string][]string{
"net": {
"L0", "CGO",
"context", "math/rand", "os", "reflect", "sort", "syscall", "time",
- "internal/nettrace", "internal/poll",
+ "internal/nettrace", "internal/poll", "internal/syscall/unix",
"internal/syscall/windows", "internal/singleflight", "internal/race",
"golang_org/x/net/dns/dnsmessage", "golang_org/x/net/lif", "golang_org/x/net/route",
},
diff --git a/src/go/constant/value.go b/src/go/constant/value.go
index 64f8eb660a..0982243edb 100644
--- a/src/go/constant/value.go
+++ b/src/go/constant/value.go
@@ -170,7 +170,7 @@ func (x int64Val) String() string { return strconv.FormatInt(int64(x), 10) }
func (x intVal) String() string { return x.val.String() }
func (x ratVal) String() string { return rtof(x).String() }
-// String returns returns a decimal approximation of the Float value.
+// String returns a decimal approximation of the Float value.
func (x floatVal) String() string {
f := x.val
diff --git a/src/go/doc/doc.go b/src/go/doc/doc.go
index 3c3e28d48f..d0d4d3265b 100644
--- a/src/go/doc/doc.go
+++ b/src/go/doc/doc.go
@@ -79,13 +79,18 @@ type Note struct {
type Mode int
const (
- // extract documentation for all package-level declarations,
- // not just exported ones
+ // AllDecls says to extract documentation for all package-level
+ // declarations, not just exported ones.
AllDecls Mode = 1 << iota
- // show all embedded methods, not just the ones of
- // invisible (unexported) anonymous fields
+ // AllMethods says to show all embedded methods, not just the ones of
+ // invisible (unexported) anonymous fields.
AllMethods
+
+ // PreserveAST says to leave the AST unmodified. Originally, pieces of
+ // the AST such as function bodies were nil-ed out to save memory in
+ // godoc, but not all programs want that behavior.
+ PreserveAST
)
// New computes the package documentation for the given package AST.
diff --git a/src/go/doc/example.go b/src/go/doc/example.go
index 5b40bb0fb2..d6d4ece3a8 100644
--- a/src/go/doc/example.go
+++ b/src/go/doc/example.go
@@ -68,6 +68,9 @@ func Examples(files ...*ast.File) []*Example {
if !isTest(name, "Example") {
continue
}
+ if f.Body == nil { // ast.File.Body nil dereference (see issue 28044)
+ continue
+ }
var doc string
if f.Doc != nil {
doc = f.Doc.Text()
diff --git a/src/go/doc/reader.go b/src/go/doc/reader.go
index 21d5907a03..26365e46b5 100644
--- a/src/go/doc/reader.go
+++ b/src/go/doc/reader.go
@@ -36,9 +36,10 @@ func recvString(recv ast.Expr) string {
// set creates the corresponding Func for f and adds it to mset.
// If there are multiple f's with the same name, set keeps the first
-// one with documentation; conflicts are ignored.
+// one with documentation; conflicts are ignored. The boolean
+// specifies whether to leave the AST untouched.
//
-func (mset methodSet) set(f *ast.FuncDecl) {
+func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) {
name := f.Name.Name
if g := mset[name]; g != nil && g.Doc != "" {
// A function with the same name has already been registered;
@@ -65,7 +66,9 @@ func (mset methodSet) set(f *ast.FuncDecl) {
Recv: recv,
Orig: recv,
}
- f.Doc = nil // doc consumed - remove from AST
+ if !preserveAST {
+ f.Doc = nil // doc consumed - remove from AST
+ }
}
// add adds method m to the method set; m is ignored if the method set
@@ -299,8 +302,9 @@ func (r *reader) readValue(decl *ast.GenDecl) {
Decl: decl,
order: r.order,
})
- decl.Doc = nil // doc consumed - remove from AST
-
+ if r.mode&PreserveAST == 0 {
+ decl.Doc = nil // doc consumed - remove from AST
+ }
// Note: It's important that the order used here is global because the cleanupTypes
// methods may move values associated with types back into the global list. If the
// order is list-specific, sorting is not deterministic because the same order value
@@ -339,12 +343,14 @@ func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
// compute documentation
doc := spec.Doc
- spec.Doc = nil // doc consumed - remove from AST
if doc == nil {
// no doc associated with the spec, use the declaration doc, if any
doc = decl.Doc
}
- decl.Doc = nil // doc consumed - remove from AST
+ if r.mode&PreserveAST == 0 {
+ spec.Doc = nil // doc consumed - remove from AST
+ decl.Doc = nil // doc consumed - remove from AST
+ }
typ.doc = doc.Text()
// record anonymous fields (they may contribute methods)
@@ -362,8 +368,10 @@ func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
// readFunc processes a func or method declaration.
//
func (r *reader) readFunc(fun *ast.FuncDecl) {
- // strip function body
- fun.Body = nil
+ // strip function body if requested.
+ if r.mode&PreserveAST == 0 {
+ fun.Body = nil
+ }
// associate methods with the receiver type, if any
if fun.Recv != nil {
@@ -380,7 +388,7 @@ func (r *reader) readFunc(fun *ast.FuncDecl) {
return
}
if typ := r.lookupType(recvTypeName); typ != nil {
- typ.methods.set(fun)
+ typ.methods.set(fun, r.mode&PreserveAST != 0)
}
// otherwise ignore the method
// TODO(gri): There may be exported methods of non-exported types
@@ -414,13 +422,13 @@ func (r *reader) readFunc(fun *ast.FuncDecl) {
}
// If there is exactly one result type, associate the function with that type.
if numResultTypes == 1 {
- typ.funcs.set(fun)
+ typ.funcs.set(fun, r.mode&PreserveAST != 0)
return
}
}
// just an ordinary function
- r.funcs.set(fun)
+ r.funcs.set(fun, r.mode&PreserveAST != 0)
}
var (
@@ -481,7 +489,9 @@ func (r *reader) readFile(src *ast.File) {
// add package documentation
if src.Doc != nil {
r.readDoc(src.Doc)
- src.Doc = nil // doc consumed - remove from AST
+ if r.mode&PreserveAST == 0 {
+ src.Doc = nil // doc consumed - remove from AST
+ }
}
// add all declarations
@@ -545,7 +555,9 @@ func (r *reader) readFile(src *ast.File) {
// collect MARKER(...): annotations
r.readNotes(src.Comments)
- src.Comments = nil // consumed unassociated comments - remove from AST
+ if r.mode&PreserveAST == 0 {
+ src.Comments = nil // consumed unassociated comments - remove from AST
+ }
}
func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
diff --git a/src/go/internal/gccgoimporter/gccgoinstallation.go b/src/go/internal/gccgoimporter/gccgoinstallation.go
index 622dfc8b69..8fc7ce3232 100644
--- a/src/go/internal/gccgoimporter/gccgoinstallation.go
+++ b/src/go/internal/gccgoimporter/gccgoinstallation.go
@@ -26,8 +26,10 @@ type GccgoInstallation struct {
}
// Ask the driver at the given path for information for this GccgoInstallation.
-func (inst *GccgoInstallation) InitFromDriver(gccgoPath string) (err error) {
- cmd := exec.Command(gccgoPath, "-###", "-S", "-x", "go", "-")
+// The given arguments are passed directly to the call of the driver.
+func (inst *GccgoInstallation) InitFromDriver(gccgoPath string, args ...string) (err error) {
+ argv := append([]string{"-###", "-S", "-x", "go", "-"}, args...)
+ cmd := exec.Command(gccgoPath, argv...)
stderr, err := cmd.StderrPipe()
if err != nil {
return
@@ -55,7 +57,8 @@ func (inst *GccgoInstallation) InitFromDriver(gccgoPath string) (err error) {
}
}
- stdout, err := exec.Command(gccgoPath, "-dumpversion").Output()
+ argv = append([]string{"-dumpversion"}, args...)
+ stdout, err := exec.Command(gccgoPath, argv...).Output()
if err != nil {
return
}
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index 189bfb4223..ba16b65224 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -300,7 +300,7 @@ func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
-// and line comments.
+// line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
diff --git a/src/go/printer/testdata/parser.go b/src/go/printer/testdata/parser.go
index 44dfa19ff3..80b476cf2e 100644
--- a/src/go/printer/testdata/parser.go
+++ b/src/go/printer/testdata/parser.go
@@ -290,7 +290,7 @@ func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int)
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
-// and line comments.
+// line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
index 23bbb2885f..e78abf12a2 100644
--- a/src/go/scanner/scanner.go
+++ b/src/go/scanner/scanner.go
@@ -85,6 +85,15 @@ func (s *Scanner) next() {
}
}
+// peek returns the byte following the most recently read character without
+// advancing the scanner. If the scanner is at EOF, peek returns 0.
+func (s *Scanner) peek() byte {
+ if s.rdOffset < len(s.src) {
+ return s.src[s.rdOffset]
+ }
+ return 0
+}
+
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
@@ -735,14 +744,13 @@ scanAgain:
if '0' <= s.ch && s.ch <= '9' {
insertSemi = true
tok, lit = s.scanNumber(true)
- } else if s.ch == '.' {
- s.next()
- if s.ch == '.' {
- s.next()
- tok = token.ELLIPSIS
- }
} else {
tok = token.PERIOD
+ if s.ch == '.' && s.peek() == '.' {
+ s.next()
+ s.next() // consume last '.'
+ tok = token.ELLIPSIS
+ }
}
case ',':
tok = token.COMMA
diff --git a/src/go/scanner/scanner_test.go b/src/go/scanner/scanner_test.go
index 0aad368099..36c962209c 100644
--- a/src/go/scanner/scanner_test.go
+++ b/src/go/scanner/scanner_test.go
@@ -757,6 +757,7 @@ var errors = []struct {
{"\a", token.ILLEGAL, 0, "", "illegal character U+0007"},
{`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"},
{`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"},
+ {"..", token.PERIOD, 0, "", ""}, // two periods, not invalid token (issue #28112)
{`' '`, token.CHAR, 0, `' '`, ""},
{`''`, token.CHAR, 0, `''`, "illegal rune literal"},
{`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"},
@@ -822,7 +823,7 @@ func TestScanErrors(t *testing.T) {
// Verify that no comments show up as literal values when skipping comments.
func TestIssue10213(t *testing.T) {
- var src = `
+ const src = `
var (
A = 1 // foo
)
@@ -855,6 +856,23 @@ func TestIssue10213(t *testing.T) {
}
}
+func TestIssue28112(t *testing.T) {
+ const src = "... .. 0.. .." // make sure to have stand-alone ".." immediately before EOF to test EOF behavior
+ tokens := []token.Token{token.ELLIPSIS, token.PERIOD, token.PERIOD, token.FLOAT, token.PERIOD, token.PERIOD, token.PERIOD, token.EOF}
+ var s Scanner
+ s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0)
+ for _, want := range tokens {
+ pos, got, lit := s.Scan()
+ if got != want {
+ t.Errorf("%s: got %s, want %s", fset.Position(pos), got, want)
+ }
+ // literals expect to have a (non-empty) literal string and we don't care about other tokens for this test
+ if tokenclass(got) == literal && lit == "" {
+ t.Errorf("%s: for %s got empty literal string", fset.Position(pos), got)
+ }
+ }
+}
+
func BenchmarkScan(b *testing.B) {
b.StopTimer()
fset := token.NewFileSet()
diff --git a/src/go/types/api.go b/src/go/types/api.go
index b1fcb2d10b..1252aade35 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -353,20 +353,20 @@ func (conf *Config) Check(path string, fset *token.FileSet, files []*ast.File, i
// AssertableTo reports whether a value of type V can be asserted to have type T.
func AssertableTo(V *Interface, T Type) bool {
- m, _ := assertableTo(V, T)
+ m, _ := (*Checker)(nil).assertableTo(V, T)
return m == nil
}
// AssignableTo reports whether a value of type V is assignable to a variable of type T.
func AssignableTo(V, T Type) bool {
x := operand{mode: value, typ: V}
- return x.assignableTo(nil, T, nil) // config not needed for non-constant x
+ return x.assignableTo(nil, T, nil) // check not needed for non-constant x
}
// ConvertibleTo reports whether a value of type V is convertible to a value of type T.
func ConvertibleTo(V, T Type) bool {
x := operand{mode: value, typ: V}
- return x.convertibleTo(nil, T) // config not needed for non-constant x
+ return x.convertibleTo(nil, T) // check not needed for non-constant x
}
// Implements reports whether type V implements interface T.
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index c34ecbf9d1..fe3950a52d 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -257,6 +257,16 @@ func TestTypesInfo(t *testing.T) {
`(string, bool)`,
},
+ // issue 28277
+ {`package issue28277_a; func f(...int)`,
+ `...int`,
+ `[]int`,
+ },
+ {`package issue28277_b; func f(a, b int, c ...[]struct{})`,
+ `...[]struct{}`,
+ `[][]struct{}`,
+ },
+
// tests for broken code that doesn't parse or type-check
{`package x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`},
{`package x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`},
@@ -389,6 +399,8 @@ func TestPredicatesInfo(t *testing.T) {
{`package t0; type _ int`, `int`, `type`},
{`package t1; type _ []int`, `[]int`, `type`},
{`package t2; type _ func()`, `func()`, `type`},
+ {`package t3; type _ func(int)`, `int`, `type`},
+ {`package t3; type _ func(...int)`, `...int`, `type`},
// built-ins
{`package b0; var _ = len("")`, `len`, `builtin`},
diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go
index 27002f6699..efa0cbba50 100644
--- a/src/go/types/assignments.go
+++ b/src/go/types/assignments.go
@@ -57,7 +57,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
return
}
- if reason := ""; !x.assignableTo(check.conf, T, &reason) {
+ if reason := ""; !x.assignableTo(check, T, &reason) {
if reason != "" {
check.errorf(x.pos(), "cannot use %s as %s value in %s: %s", x, T, context, reason)
} else {
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index d3f0c4d40d..882c773db4 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -95,7 +95,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
// spec: "As a special case, append also accepts a first argument assignable
// to type []byte with a second argument of string type followed by ... .
// This form appends the bytes of the string.
- if nargs == 2 && call.Ellipsis.IsValid() && x.assignableTo(check.conf, NewSlice(universeByte), nil) {
+ if nargs == 2 && call.Ellipsis.IsValid() && x.assignableTo(check, NewSlice(universeByte), nil) {
arg(x, 1)
if x.mode == invalid {
return
@@ -345,7 +345,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
return
}
- if !x.assignableTo(check.conf, m.key, nil) {
+ if !x.assignableTo(check, m.key, nil) {
check.invalidArg(x.pos(), "%s is not assignable to %s", x, m.key)
return
}
diff --git a/src/go/types/call.go b/src/go/types/call.go
index d5c196afe8..52f1ac31ce 100644
--- a/src/go/types/call.go
+++ b/src/go/types/call.go
@@ -383,6 +383,11 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
goto Error
}
+ // methods may not have a fully set up signature yet
+ if m, _ := obj.(*Func); m != nil {
+ check.objDecl(m, nil)
+ }
+
if x.mode == typexpr {
// method expression
m, _ := obj.(*Func)
diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go
index 2bdfc150f4..45e1fcb605 100644
--- a/src/go/types/check_test.go
+++ b/src/go/types/check_test.go
@@ -92,6 +92,9 @@ var tests = [][]string{
{"testdata/blank.src"},
{"testdata/issue25008b.src", "testdata/issue25008a.src"}, // order (b before a) is crucial!
{"testdata/issue26390.src"}, // stand-alone test to ensure case is triggered
+ {"testdata/issue23203a.src"},
+ {"testdata/issue23203b.src"},
+ {"testdata/issue28251.src"},
}
var fset = token.NewFileSet()
diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go
index 81a65838fe..fecb7b617f 100644
--- a/src/go/types/conversions.go
+++ b/src/go/types/conversions.go
@@ -18,7 +18,7 @@ func (check *Checker) conversion(x *operand, T Type) {
case constArg && isConstType(T):
// constant conversion
switch t := T.Underlying().(*Basic); {
- case representableConst(x.val, check.conf, t, &x.val):
+ case representableConst(x.val, check, t, &x.val):
ok = true
case isInteger(x.typ) && isString(t):
codepoint := int64(-1)
@@ -31,7 +31,7 @@ func (check *Checker) conversion(x *operand, T Type) {
x.val = constant.MakeString(string(codepoint))
ok = true
}
- case x.convertibleTo(check.conf, T):
+ case x.convertibleTo(check, T):
// non-constant conversion
x.mode = value
ok = true
@@ -76,9 +76,12 @@ func (check *Checker) conversion(x *operand, T Type) {
// is tricky because we'd have to run updateExprType on the argument first.
// (Issue #21982.)
-func (x *operand) convertibleTo(conf *Config, T Type) bool {
+// convertibleTo reports whether T(x) is valid.
+// The check parameter may be nil if convertibleTo is invoked through an
+// exported API call, i.e., when all methods have been type-checked.
+func (x *operand) convertibleTo(check *Checker, T Type) bool {
// "x is assignable to T"
- if x.assignableTo(conf, T, nil) {
+ if x.assignableTo(check, T, nil) {
return true
}
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index 0ff1fb058b..1e2790a171 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -150,31 +150,6 @@ func (check *Checker) objDecl(obj Object, def *Named) {
}
case *TypeName:
- // fixFor26390 enables a temporary work-around to handle alias type names
- // that have not been given a type yet even though the underlying type
- // is already known. See testdata/issue26390.src for a simple example.
- // Set this flag to false to disable this code quickly (and comment
- // out the new test in decls4.src that will fail again).
- // TODO(gri) remove this for Go 1.12 in favor of a more comprehensive fix
- const fixFor26390 = true
- if fixFor26390 {
- // If we have a package-level alias type name that has not been
- // given a type yet but the underlying type is a type name that
- // has been given a type already, don't report a cycle but use
- // the underlying type name's type instead. The cycle shouldn't
- // exist in the first place in this case and is due to the way
- // methods are type-checked at the moment. See also the comment
- // at the end of Checker.typeDecl below.
- if d := check.objMap[obj]; d != nil && d.alias && obj.typ == Typ[Invalid] {
- // If we can find the underlying type name syntactically
- // and it has a type, use that type.
- if tname := check.resolveBaseTypeName(ast.NewIdent(obj.name)); tname != nil && tname.typ != nil {
- obj.typ = tname.typ
- break
- }
- }
- }
-
if check.typeCycle(obj) {
// break cycle
// (without this, calling underlying()
@@ -245,14 +220,6 @@ func (check *Checker) objDecl(obj Object, def *Named) {
// Indirections are used to break type cycles.
var indir = NewTypeName(token.NoPos, nil, "*", nil)
-// cutCycle is a sentinel type name that is pushed onto the object path
-// to indicate that a cycle doesn't actually exist. This is currently
-// needed to break cycles formed via method declarations because they
-// are type-checked together with their receiver base types. Once methods
-// are type-checked separately (see also TODO in Checker.typeDecl), we
-// can get rid of this.
-var cutCycle = NewTypeName(token.NoPos, nil, "!", nil)
-
// typeCycle checks if the cycle starting with obj is valid and
// reports an error if it is not.
// TODO(gri) rename s/typeCycle/cycle/ once we don't need the other
@@ -293,16 +260,10 @@ func (check *Checker) typeCycle(obj Object) (isCycle bool) {
case *Const, *Var:
nval++
case *TypeName:
- switch {
- case obj == indir:
+ if obj == indir {
ncycle-- // don't count (indirections are not objects)
hasIndir = true
- case obj == cutCycle:
- // The cycle is not real and only caused by the fact
- // that we type-check methods when we type-check their
- // receiver base types.
- return false
- default:
+ } else {
// Determine if the type name is an alias or not. For
// package-level objects, use the object map which
// provides syntactic information (which doesn't rely
@@ -515,11 +476,6 @@ func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, alias bo
}
- // check and add associated methods
- // TODO(gri) It's easy to create pathological cases where the
- // current approach is incorrect: In general we need to know
- // and add all methods _before_ type-checking the type.
- // See https://play.golang.org/p/WMpE0q2wK8
check.addMethodDecls(obj)
}
@@ -559,15 +515,7 @@ func (check *Checker) addMethodDecls(obj *TypeName) {
}
}
- // Suppress detection of type cycles occurring through method
- // declarations - they wouldn't exist if methods were type-
- // checked separately from their receiver base types. See also
- // comment at the end of Checker.typeDecl.
- // TODO(gri) Remove this once methods are type-checked separately.
- check.push(cutCycle)
- defer check.pop()
-
- // type-check methods
+ // add valid methods
for _, m := range methods {
// spec: "For a base type, the non-blank names of methods bound
// to it must be unique."
@@ -585,9 +533,6 @@ func (check *Checker) addMethodDecls(obj *TypeName) {
continue
}
- // type-check
- check.objDecl(m, nil)
-
if base != nil {
base.methods = append(base.methods, m)
}
diff --git a/src/go/types/expr.go b/src/go/types/expr.go
index fc4de98eb7..35e9b36f31 100644
--- a/src/go/types/expr.go
+++ b/src/go/types/expr.go
@@ -187,11 +187,20 @@ func roundFloat64(x constant.Value) constant.Value {
// representable floating-point and complex values, and to an Int
// value for integer values; it is left alone otherwise.
// It is ok to provide the addressof the first argument for rounded.
-func representableConst(x constant.Value, conf *Config, typ *Basic, rounded *constant.Value) bool {
+//
+// The check parameter may be nil if representableConst is invoked
+// (indirectly) through an exported API call (AssignableTo, ConvertibleTo)
+// because we don't need the Checker's config for those calls.
+func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *constant.Value) bool {
if x.Kind() == constant.Unknown {
return true // avoid follow-up errors
}
+ var conf *Config
+ if check != nil {
+ conf = check.conf
+ }
+
switch {
case isInteger(typ):
x := constant.ToInt(x)
@@ -323,7 +332,7 @@ func representableConst(x constant.Value, conf *Config, typ *Basic, rounded *con
// representable checks that a constant operand is representable in the given basic type.
func (check *Checker) representable(x *operand, typ *Basic) {
assert(x.mode == constant_)
- if !representableConst(x.val, check.conf, typ, &x.val) {
+ if !representableConst(x.val, check, typ, &x.val) {
var msg string
if isNumeric(x.typ) && isNumeric(typ) {
// numeric conversion : error msg
@@ -576,15 +585,15 @@ func (check *Checker) comparison(x, y *operand, op token.Token) {
// spec: "In any comparison, the first operand must be assignable
// to the type of the second operand, or vice versa."
err := ""
- if x.assignableTo(check.conf, y.typ, nil) || y.assignableTo(check.conf, x.typ, nil) {
+ if x.assignableTo(check, y.typ, nil) || y.assignableTo(check, x.typ, nil) {
defined := false
switch op {
case token.EQL, token.NEQ:
// spec: "The equality operators == and != apply to operands that are comparable."
- defined = Comparable(x.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ)
+ defined = Comparable(x.typ) && Comparable(y.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ)
case token.LSS, token.LEQ, token.GTR, token.GEQ:
// spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
- defined = isOrdered(x.typ)
+ defined = isOrdered(x.typ) && isOrdered(y.typ)
default:
unreachable()
}
@@ -1547,7 +1556,7 @@ func keyVal(x constant.Value) interface{} {
// typeAssertion checks that x.(T) is legal; xtyp must be the type of x.
func (check *Checker) typeAssertion(pos token.Pos, x *operand, xtyp *Interface, T Type) {
- method, wrongType := assertableTo(xtyp, T)
+ method, wrongType := check.assertableTo(xtyp, T)
if method == nil {
return
}
diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go
index 8560bb9b7d..cf489b1c9a 100644
--- a/src/go/types/issues_test.go
+++ b/src/go/types/issues_test.go
@@ -355,3 +355,92 @@ func TestIssue25627(t *testing.T) {
})
}
}
+
+func TestIssue28005(t *testing.T) {
+ // method names must match defining interface name for this test
+ // (see last comment in this function)
+ sources := [...]string{
+ "package p; type A interface{ A() }",
+ "package p; type B interface{ B() }",
+ "package p; type X interface{ A; B }",
+ }
+
+ // compute original file ASTs
+ var orig [len(sources)]*ast.File
+ for i, src := range sources {
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ orig[i] = f
+ }
+
+ // run the test for all order permutations of the incoming files
+ for _, perm := range [][len(sources)]int{
+ {0, 1, 2},
+ {0, 2, 1},
+ {1, 0, 2},
+ {1, 2, 0},
+ {2, 0, 1},
+ {2, 1, 0},
+ } {
+ // create file order permutation
+ files := make([]*ast.File, len(sources))
+ for i := range perm {
+ files[i] = orig[perm[i]]
+ }
+
+ // type-check package with given file order permutation
+ var conf Config
+ info := &Info{Defs: make(map[*ast.Ident]Object)}
+ _, err := conf.Check("", fset, files, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for interface object X
+ var obj Object
+ for name, def := range info.Defs {
+ if name.Name == "X" {
+ obj = def
+ break
+ }
+ }
+ if obj == nil {
+ t.Fatal("interface not found")
+ }
+ iface := obj.Type().Underlying().(*Interface) // I must be an interface
+
+ // Each iface method m is embedded; and m's receiver base type name
+ // must match the method's name per the choice in the source file.
+ for i := 0; i < iface.NumMethods(); i++ {
+ m := iface.Method(i)
+ recvName := m.Type().(*Signature).Recv().Type().(*Named).Obj().Name()
+ if recvName != m.Name() {
+ t.Errorf("perm %v: got recv %s; want %s", perm, recvName, m.Name())
+ }
+ }
+ }
+}
+
+func TestIssue28282(t *testing.T) {
+ // create type interface { error }
+ et := Universe.Lookup("error").Type()
+ it := NewInterfaceType(nil, []Type{et})
+ it.Complete()
+ // verify that after completing the interface, the embedded method remains unchanged
+ want := et.Underlying().(*Interface).Method(0)
+ got := it.Method(0)
+ if got != want {
+ t.Fatalf("%s.Method(0): got %q (%p); want %q (%p)", it, got, got, want, want)
+ }
+ // verify that lookup finds the same method in both interfaces (redundant check)
+ obj, _, _ := LookupFieldOrMethod(et, false, nil, "Error")
+ if obj != want {
+ t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", et, obj, obj, want, want)
+ }
+ obj, _, _ = LookupFieldOrMethod(it, false, nil, "Error")
+ if obj != want {
+ t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", it, obj, obj, want, want)
+ }
+}
diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go
index f31ef9cfe9..e6764f45a0 100644
--- a/src/go/types/lookup.go
+++ b/src/go/types/lookup.go
@@ -6,6 +6,11 @@
package types
+// Internal use of LookupFieldOrMethod: If the obj result is a method
+// associated with a concrete (non-interface) type, the method's signature
+// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
+// the method's type.
+
// LookupFieldOrMethod looks up a field or method with given package and name
// in T and returns the corresponding *Var or *Func, an index sequence, and a
// bool indicating if there were any pointer indirections on the path to the
@@ -112,7 +117,7 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
// look for a matching attached method
if i, m := lookupMethod(named.methods, pkg, name); m != nil {
// potential match
- assert(m.typ != nil)
+ // caution: method may not have a proper signature yet
index = concat(e.index, i)
if obj != nil || e.multiples {
return nil, index, false // collision
@@ -248,6 +253,14 @@ func lookupType(m map[Type]int, typ Type) (int, bool) {
// x is of interface type V).
//
func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) {
+ return (*Checker)(nil).missingMethod(V, T, static)
+}
+
+// missingMethod is like MissingMethod but accepts a receiver.
+// The receiver may be nil if missingMethod is invoked through
+// an exported API call (such as MissingMethod), i.e., when all
+// methods have been type-checked.
+func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) {
// fast path for common case
if T.Empty() {
return
@@ -275,11 +288,17 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b
for _, m := range T.allMethods {
obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name)
+ // we must have a method (not a field of matching function type)
f, _ := obj.(*Func)
if f == nil {
return m, false
}
+ // methods may not have a fully set up signature yet
+ if check != nil {
+ check.objDecl(f, nil)
+ }
+
if !Identical(f.typ, m.typ) {
return m, true
}
@@ -291,14 +310,16 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b
// assertableTo reports whether a value of type V can be asserted to have type T.
// It returns (nil, false) as affirmative answer. Otherwise it returns a missing
// method required by V and whether it is missing or just has the wrong type.
-func assertableTo(V *Interface, T Type) (method *Func, wrongType bool) {
+// The receiver may be nil if assertableTo is invoked through an exported API call
+// (such as AssertableTo), i.e., when all methods have been type-checked.
+func (check *Checker) assertableTo(V *Interface, T Type) (method *Func, wrongType bool) {
// no static check is required if T is an interface
// spec: "If T is an interface type, x.(T) asserts that the
// dynamic type of x implements the interface T."
if _, ok := T.Underlying().(*Interface); ok && !strict {
return
}
- return MissingMethod(T, V, false)
+ return check.missingMethod(T, V, false)
}
// deref dereferences typ if it is a *Pointer and returns its base and true.
diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go
index 2b810da728..619c448492 100644
--- a/src/go/types/methodset.go
+++ b/src/go/types/methodset.go
@@ -255,8 +255,20 @@ func (s methodSet) add(list []*Func, index []int, indirect bool, multiples bool)
}
// ptrRecv reports whether the receiver is of the form *T.
-// The receiver must exist.
func ptrRecv(f *Func) bool {
- _, isPtr := deref(f.typ.(*Signature).recv.typ)
- return isPtr
+ // If a method's receiver type is set, use that as the source of truth for the receiver.
+ // Caution: Checker.funcDecl (decl.go) marks a function by setting its type to an empty
+ // signature. We may reach here before the signature is fully set up: we must explicitly
+ // check if the receiver is set (we cannot just look for non-nil f.typ).
+ if sig, _ := f.typ.(*Signature); sig != nil && sig.recv != nil {
+ _, isPtr := deref(sig.recv.typ)
+ return isPtr
+ }
+
+ // If a method's type is not set it may be a method/function that is:
+ // 1) client-supplied (via NewFunc with no signature), or
+ // 2) internally created but not yet type-checked.
+ // For case 1) we can't do anything; the client must know what they are doing.
+ // For case 2) we can use the information gathered by the resolver.
+ return f.hasPtrRecv
}
diff --git a/src/go/types/object.go b/src/go/types/object.go
index 07adfbc34c..cf773238a0 100644
--- a/src/go/types/object.go
+++ b/src/go/types/object.go
@@ -294,6 +294,7 @@ func (*Var) isDependency() {} // a variable may be a dependency of an initializa
// An abstract method may belong to many interfaces due to embedding.
type Func struct {
object
+ hasPtrRecv bool // only valid for methods that don't have a type yet
}
// NewFunc returns a new function with the given signature, representing
@@ -304,7 +305,7 @@ func NewFunc(pos token.Pos, pkg *Package, name string, sig *Signature) *Func {
if sig != nil {
typ = sig
}
- return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), token.NoPos}}
+ return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), token.NoPos}, false}
}
// FullName returns the package- or receiver-type-qualified name of
diff --git a/src/go/types/operand.go b/src/go/types/operand.go
index 07247bd6f5..97ca6c622f 100644
--- a/src/go/types/operand.go
+++ b/src/go/types/operand.go
@@ -201,7 +201,9 @@ func (x *operand) isNil() bool {
// assignableTo reports whether x is assignable to a variable of type T.
// If the result is false and a non-nil reason is provided, it may be set
// to a more detailed explanation of the failure (result != "").
-func (x *operand) assignableTo(conf *Config, T Type, reason *string) bool {
+// The check parameter may be nil if assignableTo is invoked through
+// an exported API call, i.e., when all methods have been type-checked.
+func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool {
if x.mode == invalid || T == Typ[Invalid] {
return true // avoid spurious errors
}
@@ -226,7 +228,7 @@ func (x *operand) assignableTo(conf *Config, T Type, reason *string) bool {
return true
}
if x.mode == constant_ {
- return representableConst(x.val, conf, t, nil)
+ return representableConst(x.val, check, t, nil)
}
// The result of a comparison is an untyped boolean,
// but may not be a constant.
@@ -249,7 +251,7 @@ func (x *operand) assignableTo(conf *Config, T Type, reason *string) bool {
// T is an interface type and x implements T
if Ti, ok := Tu.(*Interface); ok {
- if m, wrongType := MissingMethod(x.typ, Ti, true); m != nil /* Implements(x.typ, Ti) */ {
+ if m, wrongType := check.missingMethod(x.typ, Ti, true); m != nil /* Implements(x.typ, Ti) */ {
if reason != nil {
if wrongType {
*reason = "wrong type for method " + m.Name()
diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go
index ec7e4ed1c5..f6c3b601b2 100644
--- a/src/go/types/resolver.go
+++ b/src/go/types/resolver.go
@@ -460,63 +460,74 @@ func (check *Checker) collectObjects() {
for _, f := range methods {
fdecl := check.objMap[f].fdecl
if list := fdecl.Recv.List; len(list) > 0 {
- // f is a method
- // receiver may be of the form T or *T, possibly with parentheses
- typ := unparen(list[0].Type)
- if ptr, _ := typ.(*ast.StarExpr); ptr != nil {
- typ = unparen(ptr.X)
- }
- if base, _ := typ.(*ast.Ident); base != nil {
- // base is a potential base type name; determine
- // "underlying" defined type and associate f with it
- if tname := check.resolveBaseTypeName(base); tname != nil {
- check.methods[tname] = append(check.methods[tname], f)
- }
+ // f is a method.
+ // Determine the receiver base type and associate f with it.
+ ptr, base := check.resolveBaseTypeName(list[0].Type)
+ if base != nil {
+ f.hasPtrRecv = ptr
+ check.methods[base] = append(check.methods[base], f)
}
}
}
}
-// resolveBaseTypeName returns the non-alias receiver base type name,
-// explicitly declared in the package scope, for the given receiver
-// type name; or nil.
-func (check *Checker) resolveBaseTypeName(name *ast.Ident) *TypeName {
+// resolveBaseTypeName returns the non-alias base type name for typ, and whether
+// there was a pointer indirection to get to it. The base type name must be declared
+// in package scope, and there can be at most one pointer indirection. If no such type
+// name exists, the returned base is nil.
+func (check *Checker) resolveBaseTypeName(typ ast.Expr) (ptr bool, base *TypeName) {
+ // Algorithm: Starting from a type expression, which may be a name,
+ // we follow that type through alias declarations until we reach a
+ // non-alias type name. If we encounter anything but pointer types or
+ // parentheses we're done. If we encounter more than one pointer type
+ // we're done.
var path []*TypeName
for {
+ typ = unparen(typ)
+
+ // check if we have a pointer type
+ if pexpr, _ := typ.(*ast.StarExpr); pexpr != nil {
+ // if we've already seen a pointer, we're done
+ if ptr {
+ return false, nil
+ }
+ ptr = true
+ typ = unparen(pexpr.X) // continue with pointer base type
+ }
+
+ // typ must be the name
+ name, _ := typ.(*ast.Ident)
+ if name == nil {
+ return false, nil
+ }
+
// name must denote an object found in the current package scope
// (note that dot-imported objects are not in the package scope!)
obj := check.pkg.scope.Lookup(name.Name)
if obj == nil {
- return nil
+ return false, nil
}
+
// the object must be a type name...
tname, _ := obj.(*TypeName)
if tname == nil {
- return nil
+ return false, nil
}
// ... which we have not seen before
if check.cycle(tname, path, false) {
- return nil
+ return false, nil
}
// we're done if tdecl defined tname as a new type
// (rather than an alias)
tdecl := check.objMap[tname] // must exist for objects in package scope
if !tdecl.alias {
- return tname
+ return ptr, tname
}
- // Otherwise, if tdecl defined an alias for a (possibly parenthesized)
- // type which is not an (unqualified) named type, we're done because
- // receiver base types must be named types declared in this package.
- typ := unparen(tdecl.typ) // a type may be parenthesized
- name, _ = typ.(*ast.Ident)
- if name == nil {
- return nil
- }
-
- // continue resolving name
+ // otherwise, continue resolving
+ typ = tdecl.typ
path = append(path, tname)
}
}
diff --git a/src/go/types/scope.go b/src/go/types/scope.go
index 839a60db2e..6cf5cc66f9 100644
--- a/src/go/types/scope.go
+++ b/src/go/types/scope.go
@@ -15,9 +15,6 @@ import (
"strings"
)
-// TODO(gri) Provide scopes with a name or other mechanism so that
-// objects can use that information for better printing.
-
// A Scope maintains a set of objects and links to its containing
// (parent) and contained (children) scopes. Objects may be inserted
// and looked up by name. The zero value for Scope is a ready-to-use
diff --git a/src/go/types/testdata/cycles2.src b/src/go/types/testdata/cycles2.src
index a7f4bc60f5..fd0df4bf27 100644
--- a/src/go/types/testdata/cycles2.src
+++ b/src/go/types/testdata/cycles2.src
@@ -88,22 +88,10 @@ type T3 /* ERROR cycle */ interface {
var x3 T3
type T4 /* ERROR cycle */ interface {
- m() [unsafe.Sizeof(cast4(x4.m))]int
+ m() [unsafe.Sizeof(cast4(x4.m))]int // cast is invalid but we have a cycle, so all bets are off
}
var x4 T4
var _ = cast4(x4.m)
type cast4 func()
-
-// This test is symmetric to the T4 case: Here the cast is
-// "correct", but it doesn't work inside the T5 interface.
-
-type T5 /* ERROR cycle */ interface {
- m() [unsafe.Sizeof(cast5(x5.m))]int
-}
-
-var x5 T5
-var _ = cast5(x5.m)
-
-type cast5 func() [0]int
diff --git a/src/go/types/testdata/decls0.src b/src/go/types/testdata/decls0.src
index e75216172b..56adbbfaae 100644
--- a/src/go/types/testdata/decls0.src
+++ b/src/go/types/testdata/decls0.src
@@ -189,10 +189,10 @@ func f2(x *f2 /* ERROR "not a type" */ ) {}
func f3() (x f3 /* ERROR "not a type" */ ) { return }
func f4() (x *f4 /* ERROR "not a type" */ ) { return }
-func (S0) m1(x S0.m1 /* ERROR "field or method" */ ) {}
-func (S0) m2(x *S0.m2 /* ERROR "field or method" */ ) {}
-func (S0) m3() (x S0.m3 /* ERROR "field or method" */ ) { return }
-func (S0) m4() (x *S0.m4 /* ERROR "field or method" */ ) { return }
+func (S0) m1 /* ERROR illegal cycle */ (x S0 /* ERROR value .* is not a type */ .m1) {}
+func (S0) m2 /* ERROR illegal cycle */ (x *S0 /* ERROR value .* is not a type */ .m2) {}
+func (S0) m3 /* ERROR illegal cycle */ () (x S0 /* ERROR value .* is not a type */ .m3) { return }
+func (S0) m4 /* ERROR illegal cycle */ () (x *S0 /* ERROR value .* is not a type */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
diff --git a/src/go/types/testdata/expr2.src b/src/go/types/testdata/expr2.src
index 31dc5f021c..0c959e8011 100644
--- a/src/go/types/testdata/expr2.src
+++ b/src/go/types/testdata/expr2.src
@@ -208,6 +208,19 @@ func interfaces() {
_ = i /* ERROR mismatched types */ == s2
_ = i /* ERROR mismatched types */ == &s2
+
+ // issue #28164
+ // testcase from issue
+ _ = interface /* ERROR cannot compare */ {}(nil) == []int(nil)
+
+ // related cases
+ var e interface{}
+ var s []int
+ var x int
+ _ = e /* ERROR cannot compare */ == s
+ _ = s /* ERROR cannot compare */ == e
+ _ = e /* ERROR cannot compare */ < x
+ _ = x /* ERROR cannot compare */ < e
}
func slices() {
diff --git a/src/go/types/testdata/expr3.src b/src/go/types/testdata/expr3.src
index b4c8163324..d562f0b16b 100644
--- a/src/go/types/testdata/expr3.src
+++ b/src/go/types/testdata/expr3.src
@@ -497,7 +497,7 @@ func _calls() {
f1(x ... /* ERROR "cannot use ..." */ )
f1(g0 /* ERROR "used as value" */ ())
f1(g1())
- // f1(g2()) // TODO(gri) missing position in error message
+ f1(g2 /* ERROR "cannot use g2" */ /* ERROR "too many arguments" */ ())
f2() /* ERROR "too few arguments" */
f2(3.14) /* ERROR "too few arguments" */
diff --git a/src/go/types/testdata/issue23203a.src b/src/go/types/testdata/issue23203a.src
new file mode 100644
index 0000000000..48cb5889cd
--- /dev/null
+++ b/src/go/types/testdata/issue23203a.src
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+type T struct{}
+
+func (T) m1() {}
+func (T) m2([unsafe.Sizeof(T.m1)]int) {}
+
+func main() {}
diff --git a/src/go/types/testdata/issue23203b.src b/src/go/types/testdata/issue23203b.src
new file mode 100644
index 0000000000..638ec6c5ce
--- /dev/null
+++ b/src/go/types/testdata/issue23203b.src
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+type T struct{}
+
+func (T) m2([unsafe.Sizeof(T.m1)]int) {}
+func (T) m1() {}
+
+func main() {}
diff --git a/src/go/types/testdata/issue28251.src b/src/go/types/testdata/issue28251.src
new file mode 100644
index 0000000000..a456f5c27e
--- /dev/null
+++ b/src/go/types/testdata/issue28251.src
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for various forms of
+// method receiver declarations, per the spec clarification
+// https://golang.org/cl/142757.
+
+package issue28251
+
+// test case from issue28251
+type T struct{}
+
+type T0 = *T
+
+func (T0) m() {}
+
+func _() { (&T{}).m() }
+
+// various alternative forms
+type (
+ T1 = (((T)))
+)
+
+func ((*(T1))) m1() {}
+func _() { (T{}).m2() }
+func _() { (&T{}).m2() }
+
+type (
+ T2 = (((T3)))
+ T3 = T
+)
+
+func (T2) m2() {}
+func _() { (T{}).m2() }
+func _() { (&T{}).m2() }
+
+type (
+ T4 = ((*(T5)))
+ T5 = T
+)
+
+func (T4) m4() {}
+func _() { (T{}).m4 /* ERROR m4 is not in method set of T */ () }
+func _() { (&T{}).m4() }
+
+type (
+ T6 = (((T7)))
+ T7 = (*(T8))
+ T8 = T
+)
+
+func (T6) m6() {}
+func _() { (T{}).m6 /* ERROR m6 is not in method set of T */ () }
+func _() { (&T{}).m6() }
+
+type (
+ T9 = *T10
+ T10 = *T11
+ T11 = T
+)
+
+func (T9 /* ERROR invalid receiver \*\*T */ ) m9() {}
+func _() { (T{}).m9 /* ERROR has no field or method m9 */ () }
+func _() { (&T{}).m9 /* ERROR has no field or method m9 */ () }
diff --git a/src/go/types/testdata/issues.src b/src/go/types/testdata/issues.src
index 13f8309c82..8260f58519 100644
--- a/src/go/types/testdata/issues.src
+++ b/src/go/types/testdata/issues.src
@@ -302,3 +302,14 @@ var issue27346 = [][n /* ERROR undeclared */ ]int{
}
var issue22467 = map[int][... /* ERROR invalid use of ... */ ]int{0: {}}
+
+// Test that invalid use of ... in parameter lists is recognized
+// (issue #28281).
+func issue28281a(int, int, ...int)
+func issue28281b(a, b int, c ...int)
+func issue28281c(a, b, c ... /* ERROR can only use ... with final parameter */ int)
+func issue28281d(... /* ERROR can only use ... with final parameter */ int, int)
+func issue28281e(a, b, c ... /* ERROR can only use ... with final parameter */ int, d int)
+func issue28281f(... /* ERROR can only use ... with final parameter */ int, ... /* ERROR can only use ... with final parameter */ int, int)
+func (... /* ERROR expected type */ TT) f()
+func issue28281g() (... /* ERROR expected type */ TT)
\ No newline at end of file
diff --git a/src/go/types/type.go b/src/go/types/type.go
index d9399a6587..77426ba618 100644
--- a/src/go/types/type.go
+++ b/src/go/types/type.go
@@ -352,19 +352,14 @@ func (t *Interface) Complete() *Interface {
return t
}
+ // collect all methods
var allMethods []*Func
allMethods = append(allMethods, t.methods...)
for _, et := range t.embeddeds {
it := et.Underlying().(*Interface)
it.Complete()
- for _, tm := range it.allMethods {
- // Make a copy of the method and adjust its receiver type.
- newm := *tm
- newmtyp := *tm.typ.(*Signature)
- newm.typ = &newmtyp
- newmtyp.recv = NewVar(newm.pos, newm.pkg, "", t)
- allMethods = append(allMethods, &newm)
- }
+ // copy embedded methods unchanged (see issue #28282)
+ allMethods = append(allMethods, it.allMethods...)
}
sort.Sort(byUniqueMethodName(allMethods))
@@ -424,7 +419,7 @@ func (c *Chan) Elem() Type { return c.elem }
type Named struct {
obj *TypeName // corresponding declared object
underlying Type // possibly a *Named during setup; never a *Named once set up completely
- methods []*Func // methods declared for this type (not the method set of this type)
+ methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
}
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
diff --git a/src/go/types/typestring_test.go b/src/go/types/typestring_test.go
index 0efb7f0013..3cae4f134a 100644
--- a/src/go/types/typestring_test.go
+++ b/src/go/types/typestring_test.go
@@ -89,7 +89,7 @@ var independentTestTypes = []testEntry{
dup("func(...int) string"),
dup("func(x ...int) string"),
dup("func(x ...int) (u string)"),
- {"func(x, y ...int) (u string)", "func(x int, y ...int) (u string)"},
+ {"func(x int, y ...int) (u string)", "func(x int, y ...int) (u string)"},
// interfaces
dup("interface{}"),
diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go
index 12c5c7b0a5..b16bf962cd 100644
--- a/src/go/types/typexpr.go
+++ b/src/go/types/typexpr.go
@@ -391,7 +391,7 @@ func (check *Checker) arrayLength(e ast.Expr) int64 {
}
if isUntyped(x.typ) || isInteger(x.typ) {
if val := constant.ToInt(x.val); val.Kind() == constant.Int {
- if representableConst(val, check.conf, Typ[Int], nil) {
+ if representableConst(val, check, Typ[Int], nil) {
if n, ok := constant.Int64Val(val); ok && n >= 0 {
return n
}
@@ -414,10 +414,10 @@ func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicO
ftype := field.Type
if t, _ := ftype.(*ast.Ellipsis); t != nil {
ftype = t.Elt
- if variadicOk && i == len(list.List)-1 {
+ if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
variadic = true
} else {
- check.invalidAST(field.Pos(), "... not permitted")
+ check.softErrorf(t.Pos(), "can only use ... with final parameter in list")
// ignore ... and continue
}
}
@@ -451,9 +451,12 @@ func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicO
}
// For a variadic function, change the last parameter's type from T to []T.
- if variadic && len(params) > 0 {
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
last := params[len(params)-1]
last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
}
return
@@ -538,7 +541,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d
}
info := check.infoFromTypeLit(check.scope, iface, tname, path)
if info == nil || info == &emptyIfaceInfo {
- // error or empty interface - exit early
+ // we got an error or the empty interface - exit early
ityp.allMethods = markComplete
return
}
@@ -549,6 +552,15 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d
recvTyp = def
}
+ // Correct receiver type for all methods explicitly declared
+ // by this interface after we're done with type-checking at
+ // this level. See comment below for details.
+ check.later(func() {
+ for _, m := range ityp.methods {
+ m.typ.(*Signature).recv.typ = recvTyp
+ }
+ })
+
// collect methods
var sigfix []*methodInfo
for i, minfo := range info.methods {
@@ -558,9 +570,27 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d
pos := name.Pos()
// Don't type-check signature yet - use an
// empty signature now and update it later.
- // Since we know the receiver, set it up now
- // (required to avoid crash in ptrRecv; see
- // e.g. test case for issue 6638).
+ // But set up receiver since we know it and
+ // its position, and because interface method
+ // signatures don't get a receiver via regular
+ // type-checking (there isn't a receiver in the
+ // method's AST). Setting the receiver type is
+ // also important for ptrRecv() (see methodset.go).
+ //
+ // Note: For embedded methods, the receiver type
+ // should be the type of the interface that declared
+ // the methods in the first place. Since we get the
+ // methods here via methodInfo, which may be computed
+ // before we have all relevant interface types, we use
+ // the current interface's type (recvType). This may be
+ // the type of the interface embedding the interface that
+ // declared the methods. This doesn't matter for type-
+ // checking (we only care about the receiver type for
+ // the ptrRecv predicate, and it's never a pointer recv
+ // for interfaces), but it matters for go/types clients
+ // and for printing. We correct the receiver after type-
+ // checking.
+ //
// TODO(gri) Consider marking methods signatures
// as incomplete, for better error messages. See
// also the T4 and T5 tests in testdata/cycles2.src.
diff --git a/src/image/jpeg/fdct.go b/src/image/jpeg/fdct.go
index 3f8be4e326..201a5abd0b 100644
--- a/src/image/jpeg/fdct.go
+++ b/src/image/jpeg/fdct.go
@@ -123,14 +123,14 @@ func fdct(b *block) {
tmp13 = tmp1 + tmp3
z1 = (tmp12 + tmp13) * fix_1_175875602
z1 += 1 << (constBits - pass1Bits - 1)
- tmp0 = tmp0 * fix_1_501321110
- tmp1 = tmp1 * fix_3_072711026
- tmp2 = tmp2 * fix_2_053119869
- tmp3 = tmp3 * fix_0_298631336
- tmp10 = tmp10 * -fix_0_899976223
- tmp11 = tmp11 * -fix_2_562915447
- tmp12 = tmp12 * -fix_0_390180644
- tmp13 = tmp13 * -fix_1_961570560
+ tmp0 *= fix_1_501321110
+ tmp1 *= fix_3_072711026
+ tmp2 *= fix_2_053119869
+ tmp3 *= fix_0_298631336
+ tmp10 *= -fix_0_899976223
+ tmp11 *= -fix_2_562915447
+ tmp12 *= -fix_0_390180644
+ tmp13 *= -fix_1_961570560
tmp12 += z1
tmp13 += z1
@@ -171,14 +171,14 @@ func fdct(b *block) {
tmp13 = tmp1 + tmp3
z1 = (tmp12 + tmp13) * fix_1_175875602
z1 += 1 << (constBits + pass1Bits - 1)
- tmp0 = tmp0 * fix_1_501321110
- tmp1 = tmp1 * fix_3_072711026
- tmp2 = tmp2 * fix_2_053119869
- tmp3 = tmp3 * fix_0_298631336
- tmp10 = tmp10 * -fix_0_899976223
- tmp11 = tmp11 * -fix_2_562915447
- tmp12 = tmp12 * -fix_0_390180644
- tmp13 = tmp13 * -fix_1_961570560
+ tmp0 *= fix_1_501321110
+ tmp1 *= fix_3_072711026
+ tmp2 *= fix_2_053119869
+ tmp3 *= fix_0_298631336
+ tmp10 *= -fix_0_899976223
+ tmp11 *= -fix_2_562915447
+ tmp12 *= -fix_0_390180644
+ tmp13 *= -fix_1_961570560
tmp12 += z1
tmp13 += z1
diff --git a/src/internal/bytealg/equal_ppc64x.s b/src/internal/bytealg/equal_ppc64x.s
index 9c9cf77588..34d2a2574b 100644
--- a/src/internal/bytealg/equal_ppc64x.s
+++ b/src/internal/bytealg/equal_ppc64x.s
@@ -7,17 +7,15 @@
#include "go_asm.h"
#include "textflag.h"
-TEXT ·Equal(SB),NOSPLIT,$0-49
+TEXT ·Equal(SB),NOSPLIT|NOFRAME,$0-49
MOVD a_len+8(FP), R4
MOVD b_len+32(FP), R5
CMP R5, R4 // unequal lengths are not equal
BNE noteq
MOVD a_base+0(FP), R3
MOVD b_base+24(FP), R4
- BL memeqbody<>(SB)
-
- MOVBZ R9,ret+48(FP)
- RET
+ MOVD $ret+48(FP), R10
+ BR memeqbody<>(SB)
noteq:
MOVBZ $0,ret+48(FP)
@@ -28,7 +26,7 @@ equal:
MOVBZ R3,ret+48(FP)
RET
-TEXT bytes·Equal(SB),NOSPLIT,$0-49
+TEXT bytes·Equal(SB),NOSPLIT|NOFRAME,$0-49
FUNCDATA $0, ·Equal·args_stackmap(SB)
MOVD a_len+8(FP), R4
MOVD b_len+32(FP), R5
@@ -36,10 +34,8 @@ TEXT bytes·Equal(SB),NOSPLIT,$0-49
BNE noteq
MOVD a_base+0(FP), R3
MOVD b_base+24(FP), R4
- BL memeqbody<>(SB)
-
- MOVBZ R9,ret+48(FP)
- RET
+ MOVD $ret+48(FP), R10
+ BR memeqbody<>(SB)
noteq:
MOVBZ $0,ret+48(FP)
@@ -51,25 +47,23 @@ equal:
RET
// memequal(a, b unsafe.Pointer, size uintptr) bool
-TEXT runtime·memequal(SB),NOSPLIT,$0-25
+TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
MOVD a+0(FP), R3
MOVD b+8(FP), R4
MOVD size+16(FP), R5
+ MOVD $ret+24(FP), R10
- BL memeqbody<>(SB)
- MOVB R9, ret+24(FP)
- RET
+ BR memeqbody<>(SB)
// memequal_varlen(a, b unsafe.Pointer) bool
-TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
+TEXT runtime·memequal_varlen(SB),NOSPLIT|NOFRAME,$0-17
MOVD a+0(FP), R3
MOVD b+8(FP), R4
CMP R3, R4
BEQ eq
MOVD 8(R11), R5 // compiler stores size at offset 8 in the closure
- BL memeqbody<>(SB)
- MOVB R9, ret+16(FP)
- RET
+ MOVD $ret+16(FP), R10
+ BR memeqbody<>(SB)
eq:
MOVD $1, R3
MOVB R3, ret+16(FP)
@@ -79,7 +73,7 @@ eq:
// R3 = s1
// R4 = s2
// R5 = len
-// R9 = return value
+// R10 = addr of return value (byte)
TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0
MOVD R5,CTR
CMP R5,$8 // only optimize >=8
@@ -92,26 +86,19 @@ TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0
setup32a: // 8 byte aligned, >= 32 bytes
SRADCC $5,R5,R6 // number of 32 byte chunks to compare
MOVD R6,CTR
+ MOVD $16,R14 // index for VSX loads and stores
loop32a:
- MOVD 0(R3),R6 // doublewords to compare
- MOVD 0(R4),R7
- MOVD 8(R3),R8 //
- MOVD 8(R4),R9
- CMP R6,R7 // bytes batch?
- BNE noteq
- MOVD 16(R3),R6
- MOVD 16(R4),R7
- CMP R8,R9 // bytes match?
- MOVD 24(R3),R8
- MOVD 24(R4),R9
- BNE noteq
- CMP R6,R7 // bytes match?
- BNE noteq
+ LXVD2X (R3+R0), VS32 // VS32 = V0
+ LXVD2X (R4+R0), VS33 // VS33 = V1
+ VCMPEQUBCC V0, V1, V2 // compare, setting CR6
+ BGE CR6, noteq
+ LXVD2X (R3+R14), VS32
+ LXVD2X (R4+R14), VS33
+ VCMPEQUBCC V0, V1, V2
+ BGE CR6, noteq
ADD $32,R3 // bump up to next 32
ADD $32,R4
- CMP R8,R9 // bytes match?
- BC 8,2,loop32a // br ctr and cr
- BNE noteq
+ BC 16, 0, loop32a // br ctr and cr
ANDCC $24,R5,R6 // Any 8 byte chunks?
BEQ leftover // and result is 0
setup8a:
@@ -145,9 +132,10 @@ simple:
BNE noteq
BR equal
noteq:
- MOVD $0, R9
+ MOVB $0, (R10)
RET
equal:
- MOVD $1, R9
+ MOVD $1, R3
+ MOVB R3, (R10)
RET
diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go
index bfb016c7f7..5e38ff7703 100644
--- a/src/internal/cpu/cpu.go
+++ b/src/internal/cpu/cpu.go
@@ -6,8 +6,7 @@
// used by the Go standard library.
package cpu
-// DebugOptions is set to true by the runtime if go was compiled with GOEXPERIMENT=debugcpu
-// and GOOS is Linux or Darwin.
+// DebugOptions is set to true by the runtime if the OS supports GODEBUGCPU.
// This should not be changed after it is initialized.
var DebugOptions bool
@@ -77,6 +76,7 @@ var ARM arm
// The struct is padded to avoid false sharing.
type arm struct {
_ CacheLinePad
+ HasVFPv4 bool
HasIDIVA bool
_ CacheLinePad
}
@@ -139,8 +139,7 @@ type s390x struct {
// Initialize examines the processor and sets the relevant variables above.
// This is called by the runtime package early in program initialization,
-// before normal init functions are run. env is set by runtime on Linux and Darwin
-// if go was compiled with GOEXPERIMENT=debugcpu.
+// before normal init functions are run. env is set by runtime if the OS supports GODEBUGCPU.
func Initialize(env string) {
doinit()
processOptions(env)
@@ -154,16 +153,19 @@ var options []option
// Option names should be lower case. e.g. avx instead of AVX.
type option struct {
- Name string
- Feature *bool
+ Name string
+ Feature *bool
+ Specified bool // whether feature value was specified in GODEBUGCPU
+ Enable bool // whether feature should be enabled
+ Required bool // whether feature is mandatory and can not be disabled
}
-// processOptions disables CPU feature values based on the parsed env string.
-// The env string is expected to be of the form feature1=0,feature2=0...
+// processOptions enables or disables CPU feature values based on the parsed env string.
+// The env string is expected to be of the form feature1=value1,feature2=value2...
// where feature names is one of the architecture specifc list stored in the
-// cpu packages options variable. If env contains all=0 then all capabilities
-// referenced through the options variable are disabled. Other feature
-// names and values other than 0 are silently ignored.
+// cpu packages options variable and values are either 'on' or 'off'.
+// If env contains all=off then all cpu features referenced through the options
+// variable are disabled. Other feature names and values result in warning messages.
func processOptions(env string) {
field:
for env != "" {
@@ -176,26 +178,57 @@ field:
}
i = indexByte(field, '=')
if i < 0 {
+ print("GODEBUGCPU: no value specified for \"", field, "\"\n")
continue
}
key, value := field[:i], field[i+1:]
- // Only allow turning off CPU features by specifying '0'.
- if value == "0" {
- if key == "all" {
- for _, v := range options {
- *v.Feature = false
- }
- return
- } else {
- for _, v := range options {
- if v.Name == key {
- *v.Feature = false
- continue field
- }
- }
+ var enable bool
+ switch value {
+ case "on":
+ enable = true
+ case "off":
+ enable = false
+ default:
+ print("GODEBUGCPU: value \"", value, "\" not supported for option ", key, "\n")
+ continue field
+ }
+
+ if key == "all" {
+ for i := range options {
+ options[i].Specified = true
+ options[i].Enable = enable || options[i].Required
+ }
+ continue field
+ }
+
+ for i := range options {
+ if options[i].Name == key {
+ options[i].Specified = true
+ options[i].Enable = enable
+ continue field
}
}
+
+ print("GODEBUGCPU: unknown cpu feature \"", key, "\"\n")
+ }
+
+ for _, o := range options {
+ if !o.Specified {
+ continue
+ }
+
+ if o.Enable && !*o.Feature {
+ print("GODEBUGCPU: can not enable \"", o.Name, "\", missing hardware support\n")
+ continue
+ }
+
+ if !o.Enable && o.Required {
+ print("GODEBUGCPU: can not disable \"", o.Name, "\", required feature\n")
+ continue
+ }
+
+ *o.Feature = o.Enable
}
}
diff --git a/src/internal/cpu/cpu_arm.go b/src/internal/cpu/cpu_arm.go
index 6a5b30580c..772b67147c 100644
--- a/src/internal/cpu/cpu_arm.go
+++ b/src/internal/cpu/cpu_arm.go
@@ -15,15 +15,18 @@ var HWCap2 uint
// HWCAP/HWCAP2 bits. These are exposed by Linux and FreeBSD.
const (
+ hwcap_VFPv4 = 1 << 16
hwcap_IDIVA = 1 << 17
)
func doinit() {
options = []option{
- {"idiva", &ARM.HasIDIVA},
+ {Name: "vfpv4", Feature: &ARM.HasVFPv4},
+ {Name: "idiva", Feature: &ARM.HasIDIVA},
}
// HWCAP feature bits
+ ARM.HasVFPv4 = isSet(HWCap, hwcap_VFPv4)
ARM.HasIDIVA = isSet(HWCap, hwcap_IDIVA)
}
diff --git a/src/internal/cpu/cpu_arm64.go b/src/internal/cpu/cpu_arm64.go
index ad930af005..0b3ee8e069 100644
--- a/src/internal/cpu/cpu_arm64.go
+++ b/src/internal/cpu/cpu_arm64.go
@@ -42,32 +42,32 @@ const (
func doinit() {
options = []option{
- {"evtstrm", &ARM64.HasEVTSTRM},
- {"aes", &ARM64.HasAES},
- {"pmull", &ARM64.HasPMULL},
- {"sha1", &ARM64.HasSHA1},
- {"sha2", &ARM64.HasSHA2},
- {"crc32", &ARM64.HasCRC32},
- {"atomics", &ARM64.HasATOMICS},
- {"fphp", &ARM64.HasFPHP},
- {"asimdhp", &ARM64.HasASIMDHP},
- {"cpuid", &ARM64.HasCPUID},
- {"asimdrdm", &ARM64.HasASIMDRDM},
- {"jscvt", &ARM64.HasJSCVT},
- {"fcma", &ARM64.HasFCMA},
- {"lrcpc", &ARM64.HasLRCPC},
- {"dcpop", &ARM64.HasDCPOP},
- {"sha3", &ARM64.HasSHA3},
- {"sm3", &ARM64.HasSM3},
- {"sm4", &ARM64.HasSM4},
- {"asimddp", &ARM64.HasASIMDDP},
- {"sha512", &ARM64.HasSHA512},
- {"sve", &ARM64.HasSVE},
- {"asimdfhm", &ARM64.HasASIMDFHM},
+ {Name: "evtstrm", Feature: &ARM64.HasEVTSTRM},
+ {Name: "aes", Feature: &ARM64.HasAES},
+ {Name: "pmull", Feature: &ARM64.HasPMULL},
+ {Name: "sha1", Feature: &ARM64.HasSHA1},
+ {Name: "sha2", Feature: &ARM64.HasSHA2},
+ {Name: "crc32", Feature: &ARM64.HasCRC32},
+ {Name: "atomics", Feature: &ARM64.HasATOMICS},
+ {Name: "fphp", Feature: &ARM64.HasFPHP},
+ {Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
+ {Name: "cpuid", Feature: &ARM64.HasCPUID},
+ {Name: "asimdrdm", Feature: &ARM64.HasASIMDRDM},
+ {Name: "jscvt", Feature: &ARM64.HasJSCVT},
+ {Name: "fcma", Feature: &ARM64.HasFCMA},
+ {Name: "lrcpc", Feature: &ARM64.HasLRCPC},
+ {Name: "dcpop", Feature: &ARM64.HasDCPOP},
+ {Name: "sha3", Feature: &ARM64.HasSHA3},
+ {Name: "sm3", Feature: &ARM64.HasSM3},
+ {Name: "sm4", Feature: &ARM64.HasSM4},
+ {Name: "asimddp", Feature: &ARM64.HasASIMDDP},
+ {Name: "sha512", Feature: &ARM64.HasSHA512},
+ {Name: "sve", Feature: &ARM64.HasSVE},
+ {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
// These capabilities should always be enabled on arm64:
- // {"fp", &ARM64.HasFP},
- // {"asimd", &ARM64.HasASIMD},
+ {Name: "fp", Feature: &ARM64.HasFP, Required: true},
+ {Name: "asimd", Feature: &ARM64.HasASIMD, Required: true},
}
// HWCAP feature bits
diff --git a/src/internal/cpu/cpu_ppc64x.go b/src/internal/cpu/cpu_ppc64x.go
index 0195e663c6..f59bb9dc8d 100644
--- a/src/internal/cpu/cpu_ppc64x.go
+++ b/src/internal/cpu/cpu_ppc64x.go
@@ -34,17 +34,17 @@ const (
func doinit() {
options = []option{
- {"htm", &PPC64.HasHTM},
- {"htmnosc", &PPC64.HasHTMNOSC},
- {"darn", &PPC64.HasDARN},
- {"scv", &PPC64.HasSCV},
+ {Name: "htm", Feature: &PPC64.HasHTM},
+ {Name: "htmnosc", Feature: &PPC64.HasHTMNOSC},
+ {Name: "darn", Feature: &PPC64.HasDARN},
+ {Name: "scv", Feature: &PPC64.HasSCV},
// These capabilities should always be enabled on ppc64 and ppc64le:
- // {"vmx", &PPC64.HasVMX},
- // {"dfp", &PPC64.HasDFP},
- // {"vsx", &PPC64.HasVSX},
- // {"isel", &PPC64.HasISEL},
- // {"vcrypto", &PPC64.HasVCRYPTO},
+ {Name: "vmx", Feature: &PPC64.HasVMX, Required: true},
+ {Name: "dfp", Feature: &PPC64.HasDFP, Required: true},
+ {Name: "vsx", Feature: &PPC64.HasVSX, Required: true},
+ {Name: "isel", Feature: &PPC64.HasISEL, Required: true},
+ {Name: "vcrypto", Feature: &PPC64.HasVCRYPTO, Required: true},
}
// HWCAP feature bits
diff --git a/src/internal/cpu/cpu_s390x.go b/src/internal/cpu/cpu_s390x.go
index 23484b2950..eab77e6ee5 100644
--- a/src/internal/cpu/cpu_s390x.go
+++ b/src/internal/cpu/cpu_s390x.go
@@ -107,14 +107,14 @@ func klmdQuery() queryResult
func doinit() {
options = []option{
- {"zarch", &S390X.HasZArch},
- {"stfle", &S390X.HasSTFLE},
- {"ldisp", &S390X.HasLDisp},
- {"msa", &S390X.HasMSA},
- {"eimm", &S390X.HasEImm},
- {"dfp", &S390X.HasDFP},
- {"etf3eh", &S390X.HasETF3Enhanced},
- {"vx", &S390X.HasVX},
+ {Name: "zarch", Feature: &S390X.HasZArch},
+ {Name: "stfle", Feature: &S390X.HasSTFLE},
+ {Name: "ldisp", Feature: &S390X.HasLDisp},
+ {Name: "msa", Feature: &S390X.HasMSA},
+ {Name: "eimm", Feature: &S390X.HasEImm},
+ {Name: "dfp", Feature: &S390X.HasDFP},
+ {Name: "etf3eh", Feature: &S390X.HasETF3Enhanced},
+ {Name: "vx", Feature: &S390X.HasVX},
}
aes := []function{aes128, aes192, aes256}
diff --git a/src/internal/cpu/cpu_test.go b/src/internal/cpu/cpu_test.go
index d4115a1b87..46a351cfbc 100644
--- a/src/internal/cpu/cpu_test.go
+++ b/src/internal/cpu/cpu_test.go
@@ -13,14 +13,14 @@ import (
"testing"
)
-func MustHaveDebugOptionsEnabled(t *testing.T) {
+func MustHaveDebugOptionsSupport(t *testing.T) {
if !DebugOptions {
- t.Skipf("skipping test: cpu feature options not enabled")
+ t.Skipf("skipping test: cpu feature options not supported by OS")
}
}
func runDebugOptionsTest(t *testing.T, test string, options string) {
- MustHaveDebugOptionsEnabled(t)
+ MustHaveDebugOptionsSupport(t)
testenv.MustHaveExec(t)
@@ -30,7 +30,10 @@ func runDebugOptionsTest(t *testing.T, test string, options string) {
cmd.Env = append(cmd.Env, env)
output, err := cmd.CombinedOutput()
- got := strings.TrimSpace(string(output))
+ lines := strings.Fields(string(output))
+ lastline := lines[len(lines)-1]
+
+ got := strings.TrimSpace(lastline)
want := "PASS"
if err != nil || got != want {
t.Fatalf("%s with %s: want %s, got %v", test, env, want, got)
@@ -38,19 +41,20 @@ func runDebugOptionsTest(t *testing.T, test string, options string) {
}
func TestDisableAllCapabilities(t *testing.T) {
- runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "all=0")
+ runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "all=off")
}
func TestAllCapabilitiesDisabled(t *testing.T) {
- MustHaveDebugOptionsEnabled(t)
+ MustHaveDebugOptionsSupport(t)
- if os.Getenv("GODEBUGCPU") != "all=0" {
- t.Skipf("skipping test: GODEBUGCPU=all=0 not set")
+ if os.Getenv("GODEBUGCPU") != "all=off" {
+ t.Skipf("skipping test: GODEBUGCPU=all=off not set")
}
for _, o := range Options {
- if got := *o.Feature; got != false {
- t.Errorf("%v: expected false, got %v", o.Name, got)
+ want := o.Required
+ if got := *o.Feature; got != want {
+ t.Errorf("%v: expected %v, got %v", o.Name, want, got)
}
}
}
diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go
index 0b00779a90..5d357be62b 100644
--- a/src/internal/cpu/cpu_x86.go
+++ b/src/internal/cpu/cpu_x86.go
@@ -40,28 +40,23 @@ const (
func doinit() {
options = []option{
- {"adx", &X86.HasADX},
- {"aes", &X86.HasAES},
- {"avx", &X86.HasAVX},
- {"avx2", &X86.HasAVX2},
- {"bmi1", &X86.HasBMI1},
- {"bmi2", &X86.HasBMI2},
- {"erms", &X86.HasERMS},
- {"fma", &X86.HasFMA},
- {"pclmulqdq", &X86.HasPCLMULQDQ},
- {"popcnt", &X86.HasPOPCNT},
- {"sse3", &X86.HasSSE3},
- {"sse41", &X86.HasSSE41},
- {"sse42", &X86.HasSSE42},
- {"ssse3", &X86.HasSSSE3},
+ {Name: "adx", Feature: &X86.HasADX},
+ {Name: "aes", Feature: &X86.HasAES},
+ {Name: "avx", Feature: &X86.HasAVX},
+ {Name: "avx2", Feature: &X86.HasAVX2},
+ {Name: "bmi1", Feature: &X86.HasBMI1},
+ {Name: "bmi2", Feature: &X86.HasBMI2},
+ {Name: "erms", Feature: &X86.HasERMS},
+ {Name: "fma", Feature: &X86.HasFMA},
+ {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
+ {Name: "popcnt", Feature: &X86.HasPOPCNT},
+ {Name: "sse3", Feature: &X86.HasSSE3},
+ {Name: "sse41", Feature: &X86.HasSSE41},
+ {Name: "sse42", Feature: &X86.HasSSE42},
+ {Name: "ssse3", Feature: &X86.HasSSSE3},
- // sse2 set as last element so it can easily be removed again. See code below.
- {"sse2", &X86.HasSSE2},
- }
-
- // Remove sse2 from options on amd64(p32) because SSE2 is a mandatory feature for these GOARCHs.
- if GOARCH == "amd64" || GOARCH == "amd64p32" {
- options = options[:len(options)-1]
+ // These capabilities should always be enabled on amd64(p32):
+ {Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64" || GOARCH == "amd64p32"},
}
maxID, _, _, _ := cpuid(0, 0)
diff --git a/src/internal/cpu/cpu_x86_test.go b/src/internal/cpu/cpu_x86_test.go
index d03306c907..a8d0466e06 100644
--- a/src/internal/cpu/cpu_x86_test.go
+++ b/src/internal/cpu/cpu_x86_test.go
@@ -30,14 +30,14 @@ func TestX86ifAVX2hasAVX(t *testing.T) {
}
func TestDisableSSE2(t *testing.T) {
- runDebugOptionsTest(t, "TestSSE2DebugOption", "sse2=0")
+ runDebugOptionsTest(t, "TestSSE2DebugOption", "sse2=off")
}
func TestSSE2DebugOption(t *testing.T) {
- MustHaveDebugOptionsEnabled(t)
+ MustHaveDebugOptionsSupport(t)
- if os.Getenv("GODEBUGCPU") != "sse2=0" {
- t.Skipf("skipping test: GODEBUGCPU=sse2=0 not set")
+ if os.Getenv("GODEBUGCPU") != "sse2=off" {
+ t.Skipf("skipping test: GODEBUGCPU=sse2=off not set")
}
want := runtime.GOARCH != "386" // SSE2 can only be disabled on 386.
@@ -45,3 +45,20 @@ func TestSSE2DebugOption(t *testing.T) {
t.Errorf("X86.HasSSE2 on %s expected %v, got %v", runtime.GOARCH, want, got)
}
}
+
+func TestDisableSSE3(t *testing.T) {
+ runDebugOptionsTest(t, "TestSSE3DebugOption", "sse3=off")
+}
+
+func TestSSE3DebugOption(t *testing.T) {
+ MustHaveDebugOptionsSupport(t)
+
+ if os.Getenv("GODEBUGCPU") != "sse3=off" {
+ t.Skipf("skipping test: GODEBUGCPU=sse3=off not set")
+ }
+
+ want := false
+ if got := X86.HasSSE3; got != want {
+ t.Errorf("X86.HasSSE3 expected %v, got %v", want, got)
+ }
+}
diff --git a/src/internal/fmtsort/export_test.go b/src/internal/fmtsort/export_test.go
new file mode 100644
index 0000000000..25cbb5d4fc
--- /dev/null
+++ b/src/internal/fmtsort/export_test.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort
+
+import "reflect"
+
+func Compare(a, b reflect.Value) int {
+ return compare(a, b)
+}
diff --git a/src/internal/fmtsort/sort.go b/src/internal/fmtsort/sort.go
new file mode 100644
index 0000000000..c959cbee1f
--- /dev/null
+++ b/src/internal/fmtsort/sort.go
@@ -0,0 +1,216 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fmtsort provides a general stable ordering mechanism
+// for maps, on behalf of the fmt and text/template packages.
+// It is not guaranteed to be efficient and works only for types
+// that are valid map keys.
+package fmtsort
+
+import (
+ "reflect"
+ "sort"
+)
+
+// Note: Throughout this package we avoid calling reflect.Value.Interface as
+// it is not always legal to do so and it's easier to avoid the issue than to face it.
+
+// SortedMap represents a map's keys and values. The keys and values are
+// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
+type SortedMap struct {
+ Key []reflect.Value
+ Value []reflect.Value
+}
+
+func (o *SortedMap) Len() int { return len(o.Key) }
+func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
+func (o *SortedMap) Swap(i, j int) {
+ o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
+ o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
+}
+
+// Sort accepts a map and returns a SortedMap that has the same keys and
+// values but in a stable sorted order according to the keys, modulo issues
+// raised by unorderable key values such as NaNs.
+//
+// The ordering rules are more general than with Go's < operator:
+//
+// - when applicable, nil compares low
+// - ints, floats, and strings order by <
+// - NaN compares less than non-NaN floats
+// - bool compares false before true
+// - complex compares real, then imag
+// - pointers compare by machine address
+// - channel values compare by machine address
+// - structs compare each field in turn
+// - arrays compare each element in turn.
+// Otherwise identical arrays compare by length.
+// - interface values compare first by reflect.Type describing the concrete type
+// and then by concrete value as described in the previous rules.
+//
+func Sort(mapValue reflect.Value) *SortedMap {
+ if mapValue.Type().Kind() != reflect.Map {
+ return nil
+ }
+ key := make([]reflect.Value, mapValue.Len())
+ value := make([]reflect.Value, len(key))
+ iter := mapValue.MapRange()
+ for i := 0; iter.Next(); i++ {
+ key[i] = iter.Key()
+ value[i] = iter.Value()
+ }
+ sorted := &SortedMap{
+ Key: key,
+ Value: value,
+ }
+ sort.Stable(sorted)
+ return sorted
+}
+
+// compare compares two values of the same type. It returns -1, 0, 1
+// according to whether a > b (1), a == b (0), or a < b (-1).
+// If the types differ, it returns -1.
+// See the comment on Sort for the comparison rules.
+func compare(aVal, bVal reflect.Value) int {
+ aType, bType := aVal.Type(), bVal.Type()
+ if aType != bType {
+ return -1 // No good answer possible, but don't return 0: they're not equal.
+ }
+ switch aVal.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ a, b := aVal.Int(), bVal.Int()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ a, b := aVal.Uint(), bVal.Uint()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.String:
+ a, b := aVal.String(), bVal.String()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Float32, reflect.Float64:
+ return floatCompare(aVal.Float(), bVal.Float())
+ case reflect.Complex64, reflect.Complex128:
+ a, b := aVal.Complex(), bVal.Complex()
+ if c := floatCompare(real(a), real(b)); c != 0 {
+ return c
+ }
+ return floatCompare(imag(a), imag(b))
+ case reflect.Bool:
+ a, b := aVal.Bool(), bVal.Bool()
+ switch {
+ case a == b:
+ return 0
+ case a:
+ return 1
+ default:
+ return -1
+ }
+ case reflect.Ptr:
+ a, b := aVal.Pointer(), bVal.Pointer()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Chan:
+ if c, ok := nilCompare(aVal, bVal); ok {
+ return c
+ }
+ ap, bp := aVal.Pointer(), bVal.Pointer()
+ switch {
+ case ap < bp:
+ return -1
+ case ap > bp:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Struct:
+ for i := 0; i < aVal.NumField(); i++ {
+ if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
+ return c
+ }
+ }
+ return 0
+ case reflect.Array:
+ for i := 0; i < aVal.Len(); i++ {
+ if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
+ return c
+ }
+ }
+ return 0
+ case reflect.Interface:
+ if c, ok := nilCompare(aVal, bVal); ok {
+ return c
+ }
+ c := compare(reflect.ValueOf(aType), reflect.ValueOf(bType))
+ if c != 0 {
+ return c
+ }
+ return compare(aVal.Elem(), bVal.Elem())
+ default:
+ // Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
+ panic("bad type in compare: " + aType.String())
+ }
+}
+
+// nilCompare checks whether either value is nil. If not, the boolean is false.
+// If either value is nil, the boolean is true and the integer is the comparison
+// value. The comparison is defined to be 0 if both are nil, otherwise the one
+// nil value compares low. Both arguments must represent a chan, func,
+// interface, map, pointer, or slice.
+func nilCompare(aVal, bVal reflect.Value) (int, bool) {
+ if aVal.IsNil() {
+ if bVal.IsNil() {
+ return 0, true
+ }
+ return -1, true
+ }
+ if bVal.IsNil() {
+ return 1, true
+ }
+ return 0, false
+}
+
+// floatCompare compares two floating-point values. NaNs compare low.
+func floatCompare(a, b float64) int {
+ switch {
+ case isNaN(a):
+ return -1 // No good answer if b is a NaN so don't bother checking.
+ case isNaN(b):
+ return 1
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ }
+ return 0
+}
+
+func isNaN(a float64) bool {
+ return a != a
+}
diff --git a/src/internal/fmtsort/sort_test.go b/src/internal/fmtsort/sort_test.go
new file mode 100644
index 0000000000..6b10c775b0
--- /dev/null
+++ b/src/internal/fmtsort/sort_test.go
@@ -0,0 +1,212 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort_test
+
+import (
+ "fmt"
+ "internal/fmtsort"
+ "math"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var compareTests = [][]reflect.Value{
+ ct(reflect.TypeOf(int(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int8(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int16(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int32(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int64(0)), -1, 0, 1),
+ ct(reflect.TypeOf(uint(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint8(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint16(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint32(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint64(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uintptr(0)), 0, 1, 5),
+ ct(reflect.TypeOf(string("")), "", "a", "ab"),
+ ct(reflect.TypeOf(float32(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+ ct(reflect.TypeOf(float64(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+ ct(reflect.TypeOf(complex64(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+ ct(reflect.TypeOf(complex128(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+ ct(reflect.TypeOf(false), false, true),
+ ct(reflect.TypeOf(&ints[0]), &ints[0], &ints[1], &ints[2]),
+ ct(reflect.TypeOf(chans[0]), chans[0], chans[1], chans[2]),
+ ct(reflect.TypeOf(toy{}), toy{0, 1}, toy{0, 2}, toy{1, -1}, toy{1, 1}),
+ ct(reflect.TypeOf([2]int{}), [2]int{1, 1}, [2]int{1, 2}, [2]int{2, 0}),
+ ct(reflect.TypeOf(interface{}(interface{}(0))), iFace, 1, 2, 3),
+}
+
+var iFace interface{}
+
+func ct(typ reflect.Type, args ...interface{}) []reflect.Value {
+ value := make([]reflect.Value, len(args))
+ for i, v := range args {
+ x := reflect.ValueOf(v)
+ if !x.IsValid() { // Make it a typed nil.
+ x = reflect.Zero(typ)
+ } else {
+ x = x.Convert(typ)
+ }
+ value[i] = x
+ }
+ return value
+}
+
+func TestCompare(t *testing.T) {
+ for _, test := range compareTests {
+ for i, v0 := range test {
+ for j, v1 := range test {
+ c := fmtsort.Compare(v0, v1)
+ var expect int
+ switch {
+ case i == j:
+ expect = 0
+ // NaNs are tricky.
+ if typ := v0.Type(); (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && math.IsNaN(v0.Float()) {
+ expect = -1
+ }
+ case i < j:
+ expect = -1
+ case i > j:
+ expect = 1
+ }
+ if c != expect {
+ t.Errorf("%s: compare(%v,%v)=%d; expect %d", v0.Type(), v0, v1, c, expect)
+ }
+ }
+ }
+ }
+}
+
+type sortTest struct {
+ data interface{} // Always a map.
+ print string // Printed result using our custom printer.
+}
+
+var sortTests = []sortTest{
+ {
+ map[int]string{7: "bar", -3: "foo"},
+ "-3:foo 7:bar",
+ },
+ {
+ map[uint8]string{7: "bar", 3: "foo"},
+ "3:foo 7:bar",
+ },
+ {
+ map[string]string{"7": "bar", "3": "foo"},
+ "3:foo 7:bar",
+ },
+ {
+ map[float64]string{7: "bar", -3: "foo", math.NaN(): "nan", math.Inf(0): "inf"},
+ "NaN:nan -3:foo 7:bar +Inf:inf",
+ },
+ {
+ map[complex128]string{7 + 2i: "bar2", 7 + 1i: "bar", -3: "foo", complex(math.NaN(), 0i): "nan", complex(math.Inf(0), 0i): "inf"},
+ "(NaN+0i):nan (-3+0i):foo (7+1i):bar (7+2i):bar2 (+Inf+0i):inf",
+ },
+ {
+ map[bool]string{true: "true", false: "false"},
+ "false:false true:true",
+ },
+ {
+ chanMap(),
+ "CHAN0:0 CHAN1:1 CHAN2:2",
+ },
+ {
+ pointerMap(),
+ "PTR0:0 PTR1:1 PTR2:2",
+ },
+ {
+ map[toy]string{toy{7, 2}: "72", toy{7, 1}: "71", toy{3, 4}: "34"},
+ "{3 4}:34 {7 1}:71 {7 2}:72",
+ },
+ {
+ map[[2]int]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+ "[3 4]:34 [7 1]:71 [7 2]:72",
+ },
+ {
+ map[interface{}]string{7: "7", 4: "4", 3: "3", nil: "nil"},
+ ":nil 3:3 4:4 7:7",
+ },
+}
+
+func sprint(data interface{}) string {
+ om := fmtsort.Sort(reflect.ValueOf(data))
+ if om == nil {
+ return "nil"
+ }
+ b := new(strings.Builder)
+ for i, key := range om.Key {
+ if i > 0 {
+ b.WriteRune(' ')
+ }
+ b.WriteString(sprintKey(key))
+ b.WriteRune(':')
+ b.WriteString(fmt.Sprint(om.Value[i]))
+ }
+ return b.String()
+}
+
+// sprintKey formats a reflect.Value but gives reproducible values for some
+// problematic types such as pointers. Note that it only does special handling
+// for the troublesome types used in the test cases; it is not a general
+// printer.
+func sprintKey(key reflect.Value) string {
+ switch str := key.Type().String(); str {
+ case "*int":
+ ptr := key.Interface().(*int)
+ for i := range ints {
+ if ptr == &ints[i] {
+ return fmt.Sprintf("PTR%d", i)
+ }
+ }
+ return "PTR???"
+ case "chan int":
+ c := key.Interface().(chan int)
+ for i := range chans {
+ if c == chans[i] {
+ return fmt.Sprintf("CHAN%d", i)
+ }
+ }
+ return "CHAN???"
+ default:
+ return fmt.Sprint(key)
+ }
+}
+
+var (
+ ints [3]int
+ chans = [3]chan int{make(chan int), make(chan int), make(chan int)}
+)
+
+func pointerMap() map[*int]string {
+ m := make(map[*int]string)
+ for i := 2; i >= 0; i-- {
+ m[&ints[i]] = fmt.Sprint(i)
+ }
+ return m
+}
+
+func chanMap() map[chan int]string {
+ m := make(map[chan int]string)
+ for i := 2; i >= 0; i-- {
+ m[chans[i]] = fmt.Sprint(i)
+ }
+ return m
+}
+
+type toy struct {
+ A int // Exported.
+ b int // Unexported.
+}
+
+func TestOrder(t *testing.T) {
+ for _, test := range sortTests {
+ got := sprint(test.data)
+ if got != test.print {
+ t.Errorf("%s: got %q, want %q", reflect.TypeOf(test.data), got, test.print)
+ }
+ }
+}
diff --git a/src/internal/poll/fd_fsync_posix.go b/src/internal/poll/fd_fsync_posix.go
index 943f59a9ab..30dde0720b 100644
--- a/src/internal/poll/fd_fsync_posix.go
+++ b/src/internal/poll/fd_fsync_posix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows
+// +build aix dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows
package poll
diff --git a/src/internal/syscall/unix/asm_aix_ppc64.s b/src/internal/syscall/unix/asm_aix_ppc64.s
new file mode 100644
index 0000000000..9e82e3eb88
--- /dev/null
+++ b/src/internal/syscall/unix/asm_aix_ppc64.s
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System calls for aix/ppc64 are implemented in syscall/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0
+ JMP syscall·syscall6(SB)
diff --git a/src/internal/syscall/unix/ioctl_aix.go b/src/internal/syscall/unix/ioctl_aix.go
new file mode 100644
index 0000000000..19d56c36a1
--- /dev/null
+++ b/src/internal/syscall/unix/ioctl_aix.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o"
+//go:linkname libc_ioctl libc_ioctl
+var libc_ioctl uintptr
+
+// Implemented in syscall/syscall_aix.go.
+func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+func Ioctl(fd int, cmd int, args uintptr) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(cmd), uintptr(args), 0, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/internal/traceparser/consistent.go b/src/internal/traceparser/consistent.go
new file mode 100644
index 0000000000..70fbd04b3f
--- /dev/null
+++ b/src/internal/traceparser/consistent.go
@@ -0,0 +1,313 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+// postProcess is a final check of consistency, and if all is well,
+// adds links to Events
+
+import (
+ "fmt"
+)
+
+type gStatus int
+
+const (
+ gDead gStatus = iota
+ gRunnable
+ gRunning
+ gWaiting
+)
+
+// This code is copied from internal/trace/parser.go. With greater understanding it could be
+// simplified. Sets ev.P for GCStart, and set various Link fields
+func (p *Parsed) postProcess(events []*Event) error {
+ type gdesc struct {
+ state gStatus
+ ev *Event
+ evStart *Event
+ evCreate *Event
+ evMarkAssist *Event
+ }
+ type pdesc struct {
+ running bool
+ g uint64
+ evSTW *Event
+ evSweep *Event
+ }
+
+ gs := make(map[uint64]gdesc)
+ ps := make(map[int]pdesc)
+ tasks := make(map[uint64]*Event) // task id to task creation events
+ activeRegions := make(map[uint64][]*Event) // goroutine id to stack of spans
+ gs[0] = gdesc{state: gRunning}
+ var evGC, evSTW *Event
+
+ checkRunning := func(pd pdesc, g gdesc, ev *Event, allowG0 bool) error {
+ if g.state != gRunning {
+ return fmt.Errorf("saw %v, but g %d is not running", ev, ev.G)
+ }
+ if pd.g != ev.G {
+ return fmt.Errorf("saw %v, but it's P is running %d, not %d", ev, pd.g, ev.G)
+ }
+ if !allowG0 && ev.G == 0 {
+ return fmt.Errorf("saw %v with unexpected g==0", ev)
+ }
+ return nil
+ }
+ for i, ev := range events {
+ g := gs[ev.G]
+ px := ps[int(ev.P)]
+ switch ev.Type {
+ case EvProcStart:
+ if px.running {
+ return fmt.Errorf("%d: running before start %s", i, ev)
+ }
+ px.running = true
+ case EvProcStop:
+ if !px.running {
+ return fmt.Errorf("%d: p %d not running %s", i, ev.P, ev)
+ }
+ if px.g != 0 {
+ return fmt.Errorf("p %d is running a goroutine %s", ev.P, ev)
+ }
+ px.running = false
+ case EvGCStart:
+ if evGC != nil {
+ return fmt.Errorf("GC already running %s, was %s", ev, evGC)
+ }
+ evGC = ev
+ // Attribute this to the global GC state.
+ ev.P = GCP
+ case EvGCDone:
+ if evGC == nil {
+ return fmt.Errorf("%d:%s bogus GC end", i, ev)
+ }
+ evGC.Link = ev
+ evGC = nil
+ case EvGCSTWStart:
+ evp := &evSTW
+ if p.Version < 1010 {
+ // Before 1.10, EvGCSTWStart was per-P.
+ evp = &px.evSTW
+ }
+ if *evp != nil {
+ return fmt.Errorf("STW %s still running at %s", *evp, ev)
+ }
+ *evp = ev
+ case EvGCSTWDone:
+ evp := &evSTW
+ if p.Version < 1010 {
+ // Before 1.10, EvGCSTWDone was per-P.
+ evp = &px.evSTW
+ }
+ if *evp == nil {
+ return fmt.Errorf("%d: no STW running %s", i, ev)
+ }
+ (*evp).Link = ev
+ *evp = nil
+ case EvGCMarkAssistStart:
+ if g.evMarkAssist != nil {
+ return fmt.Errorf("%d: MarkAssist %s is still running at %s",
+ i, g.evMarkAssist, ev)
+ }
+ g.evMarkAssist = ev
+ case EvGCMarkAssistDone:
+ // Unlike most events, mark assists can be in progress when a
+ // goroutine starts tracing, so we can't report an error here.
+ if g.evMarkAssist != nil {
+ g.evMarkAssist.Link = ev
+ g.evMarkAssist = nil
+ }
+ case EvGCSweepStart:
+ if px.evSweep != nil {
+ return fmt.Errorf("sweep not done %d: %s", i, ev)
+ }
+ px.evSweep = ev
+ case EvGCSweepDone:
+ if px.evSweep == nil {
+ return fmt.Errorf("%d: no sweep happening %s", i, ev)
+ }
+ px.evSweep.Link = ev
+ px.evSweep = nil
+ case EvGoWaiting:
+ if g.state != gRunnable {
+ return fmt.Errorf("not runnable before %d:%s", i, ev)
+ }
+ g.state = gWaiting
+ g.ev = ev
+ case EvGoInSyscall:
+ if g.state != gRunnable {
+ return fmt.Errorf("not runnable before %d:%s", i, ev)
+ }
+ g.state = gWaiting
+ g.ev = ev
+ case EvGoCreate:
+ if err := checkRunning(px, g, ev, true); err != nil {
+ return err
+ }
+ if _, ok := gs[ev.Args[0]]; ok {
+ return fmt.Errorf("%d: already exists %s", i, ev)
+ }
+ gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
+ case EvGoStart, EvGoStartLabel:
+ if g.state != gRunnable {
+ return fmt.Errorf("not runnable before start %d:%s %+v", i, ev, g)
+ }
+ if px.g != 0 {
+ return fmt.Errorf("%d: %s has p running %d already %v", i, ev, px.g, px)
+ }
+ g.state = gRunning
+ g.evStart = ev
+ px.g = ev.G
+ if g.evCreate != nil {
+ if p.Version < 1007 {
+ // +1 because symbolizer expects return pc.
+ //PJW: aren't doing < 1007. ev.stk = []*Frame{{PC: g.evCreate.args[1] + 1}}
+ } else {
+ ev.StkID = uint32(g.evCreate.Args[1])
+ }
+ g.evCreate = nil
+ }
+
+ if g.ev != nil {
+ g.ev.Link = ev
+ g.ev = nil
+ }
+ case EvGoEnd, EvGoStop:
+ if err := checkRunning(px, g, ev, false); err != nil {
+ return fmt.Errorf("%d: %s", i, err)
+ }
+ g.evStart.Link = ev
+ g.evStart = nil
+ g.state = gDead
+ px.g = 0
+
+ if ev.Type == EvGoEnd { // flush all active Regions
+ spans := activeRegions[ev.G]
+ for _, s := range spans {
+ s.Link = ev
+ }
+ delete(activeRegions, ev.G)
+ }
+ case EvGoSched, EvGoPreempt:
+ if err := checkRunning(px, g, ev, false); err != nil {
+ return err
+ }
+ g.state = gRunnable
+ g.evStart.Link = ev
+ g.evStart = nil
+ px.g = 0
+ g.ev = ev
+ case EvGoUnblock:
+ if g.state != gRunning { // PJW, error message
+ return fmt.Errorf("Event %d (%s) is not running at unblock %s", i, ev, g.state)
+
+ }
+ if ev.P != TimerP && px.g != ev.G {
+ // PJW: do better here.
+ return fmt.Errorf("%d: %s p %d is not running g", i, ev, px.g)
+ }
+ g1 := gs[ev.Args[0]]
+ if g1.state != gWaiting {
+ return fmt.Errorf("g %v is not waiting before unpark i=%d g1=%v %s",
+ ev.Args[0], i, g1, ev)
+ }
+ if g1.ev != nil && g1.ev.Type == EvGoBlockNet && ev.P != TimerP {
+ ev.P = NetpollP
+ }
+ if g1.ev != nil {
+ g1.ev.Link = ev
+ }
+ g1.state = gRunnable
+ g1.ev = ev
+ gs[ev.Args[0]] = g1
+ case EvGoSysCall:
+ if err := checkRunning(px, g, ev, false); err != nil {
+ return err
+ }
+ g.ev = ev
+ case EvGoSysBlock:
+ if err := checkRunning(px, g, ev, false); err != nil {
+ return err
+ }
+ g.state = gWaiting
+ g.evStart.Link = ev
+ g.evStart = nil
+ px.g = 0
+ case EvGoSysExit:
+ if g.state != gWaiting {
+ return fmt.Errorf("not waiting when %s", ev)
+ }
+ if g.ev != nil && g.ev.Type == EvGoSysCall {
+ g.ev.Link = ev
+ }
+ g.state = gRunnable
+ g.ev = ev
+ case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
+ EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoBlockGC:
+ if err := checkRunning(px, g, ev, false); err != nil {
+ return err
+ }
+ g.state = gWaiting
+ g.ev = ev
+ g.evStart.Link = ev
+ g.evStart = nil
+ px.g = 0
+ case EvUserTaskCreate:
+ taskid := ev.Args[0]
+ if prevEv, ok := tasks[taskid]; ok {
+ return fmt.Errorf("task id conflicts (id:%d), %q vs %q", taskid, ev, prevEv)
+ }
+ tasks[ev.Args[0]] = ev
+ case EvUserTaskEnd:
+ if prevEv, ok := tasks[ev.Args[0]]; ok {
+ prevEv.Link = ev
+ ev.Link = prevEv
+ }
+ case EvUserRegion:
+ mode := ev.Args[1]
+ spans := activeRegions[ev.G]
+ if mode == 0 { // span start
+ activeRegions[ev.G] = append(spans, ev) // push
+ } else if mode == 1 { // span end
+ n := len(spans)
+ if n > 0 { // matching span start event is in the trace.
+ s := spans[n-1]
+ if s.Args[0] != ev.Args[0] || s.SArgs[0] != ev.SArgs[0] { // task id, span name mismatch
+ return fmt.Errorf("misuse of span in goroutine %d: span end %q when the inner-most active span start event is %q",
+ ev.G, ev, s)
+ }
+ // Link span start event with span end event
+ s.Link = ev
+ ev.Link = s
+
+ if n > 1 {
+ activeRegions[ev.G] = spans[:n-1]
+ } else {
+ delete(activeRegions, ev.G)
+ }
+ }
+ } else {
+ return fmt.Errorf("invalid user region, mode: %q", ev)
+ }
+ }
+ gs[ev.G] = g
+ ps[int(ev.P)] = px
+ }
+ return nil
+}
+func (g gStatus) String() string {
+ switch g {
+ case gDead:
+ return "gDead"
+ case gRunnable:
+ return "gRunnable"
+ case gRunning:
+ return "gRunning"
+ case gWaiting:
+ return "gWaiting"
+ }
+ return fmt.Sprintf("gStatus?%d", g)
+}
diff --git a/src/internal/traceparser/events.go b/src/internal/traceparser/events.go
new file mode 100644
index 0000000000..00451f37b5
--- /dev/null
+++ b/src/internal/traceparser/events.go
@@ -0,0 +1,312 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+import (
+ "fmt"
+ "sort"
+)
+
+// convert raw events into Events
+
+func (p *Parsed) createEvents(f func(string)) error {
+ // multiple passes:
+ // create some Events
+ // sort them by time (and adjust their times to be nanonseconds)
+ // remove events not in the desired time interval
+ // make the events consistent (adding initializing events at the beginning)
+ // remove the futile events
+ // add links (and do final checking)
+
+ // shared by firstEvents
+ p.byproc = make(map[int][]*Event)
+ p.lastGs = make(map[int]uint64)
+
+ // p.batches are always sorted by time. otherwise a batch for one p that is totally
+ // later than another batch might be done first, confusing us about g's
+ for i, b := range p.batches {
+ if b.raws == nil {
+ continue
+ }
+ if err := p.firstEvents(b); err != nil {
+ return fmt.Errorf("%v", err) // PJW: this is not useful
+ }
+ // we done with b.raws now
+ p.batches[i].raws = nil
+ }
+ f("firstEvents finished")
+ sorted := []*Event{}
+ for _, v := range p.byproc {
+ sorted = append(sorted, v...)
+ }
+ // PJW: are we done with p.byproc now? Yes. This shrinks a little.
+ p.byproc = nil
+ // Why wasn't this done earlier? Or, why do it at all?
+ for _, ev := range sorted {
+ switch ev.Type {
+ case EvGoStartLocal:
+ ev.Type = EvGoStart
+ case EvGoUnblockLocal:
+ ev.Type = EvGoUnblock
+ case EvGoSysExitLocal:
+ ev.Type = EvGoSysExit
+ }
+ }
+ // change to nanoseconds
+ freq := 1e9 / float64(p.TicksPerSec)
+ for i, ev := range sorted {
+ // Move timers and syscalls to separate fake Ps.
+ // This could be done in the loop at line 38
+ // or maybe after robust fixes things.
+ if p.timerGoids[ev.G] && ev.Type == EvGoUnblock {
+ ev.Args[2] = uint64(ev.P) // save for robust() to use
+ ev.P = TimerP
+ }
+ // sometimes the ts is not what it should be
+ if ev.Type == EvGoSysExit {
+ ev.P = SyscallP
+ if ev.Args[2] != 0 {
+ // PJW: test for this being safe. There might be no preceding
+ // EvSysBlock, EvGoInSyscall, or its time might be later than this
+ ev.Ts = int64(ev.Args[2])
+ }
+ }
+ if ev.Type == EvGCStart {
+ ev.P = GCP
+ }
+ t := ev.Ts - p.minticks
+ if t < 0 {
+ return fmt.Errorf("event %d %s would be %d mints=%x", i, ev, t, p.minticks)
+ }
+ ev.Ts = int64(float64(ev.Ts-p.minticks) * freq)
+ }
+ // Stable for the case of equal Ts's.
+ sort.SliceStable(sorted, func(i, j int) bool { return sorted[i].Ts < sorted[j].Ts })
+ f("sorted")
+ // and ignore the ones with times out of bounds
+ firstwant, lastwant := 0, len(sorted)
+ for i, ev := range sorted {
+ if ev.Ts < p.MinWant {
+ firstwant = i + 1
+ } else if ev.Ts > p.MaxWant { // closed interval [minwant, maxwant]
+ lastwant = i
+ break // sorted by Ts
+ }
+ }
+ f("nanoseconds")
+ var err error
+ sorted, _, err = p.robust(sorted[firstwant:lastwant]) // PJW: copy info from aux
+ f("consistent")
+ if err != nil {
+ return err
+ }
+ events, cnt := p.removeFutile(sorted) // err is always nil here.
+ f(fmt.Sprintf("removed %d futiles", cnt))
+ // and finally, do some checks and put in links
+ err = p.postProcess(events)
+ f("post processed")
+ if err != nil {
+ return err // PJW: is this enough? NO
+ }
+ p.Events = events
+ return nil
+}
+
+// Special P identifiers.
+const (
+ FakeP = 1000000 + iota
+ TimerP // depicts timer unblocks
+ NetpollP // depicts network unblocks
+ SyscallP // depicts returns from syscalls
+ GCP // depicts GC state
+)
+
+// convert the raw events for a batch into Events, and keep track of
+// which G is running on the P that is common to the batch.
+func (p *Parsed) firstEvents(b batch) error {
+ for _, raw := range b.raws {
+ desc := EventDescriptions[raw.typ]
+ narg := p.rawArgNum(&raw)
+ if p.Err != nil {
+ return p.Err
+ }
+ if raw.typ == EvBatch {
+ // first event, record information about P, G, and Ts
+ p.lastGs[p.lastP] = p.lastG // 0 the first time through
+ p.lastP = int(raw.Arg(0))
+ p.lastG = p.lastGs[p.lastP]
+ p.lastTs = int64(raw.Arg(1))
+ continue
+ }
+ e := &Event{Type: raw.typ, P: int32(p.lastP), G: p.lastG}
+ var argoffset int
+ if p.Version < 1007 { // can't happen.
+ e.Ts = p.lastTs + int64(raw.Arg(1))
+ argoffset = 2
+ } else {
+ e.Ts = p.lastTs + int64(raw.Arg(0))
+ argoffset = 1
+ }
+ p.lastTs = e.Ts
+ // collect the args for the raw event e
+ for i := argoffset; i < narg; i++ {
+ // evade one byte of corruption (from fuzzing typically)
+ if raw.args == nil {
+ return fmt.Errorf("raw.args is nil %s", evname(raw.typ))
+ }
+ if i > 0 && i-1 >= len(*raw.args) {
+ return fmt.Errorf("%s wants arg %d of %d", evname(raw.typ), i, len(*raw.args))
+ }
+ if i == narg-1 && desc.Stack {
+ e.StkID = uint32(raw.Arg(i))
+ } else {
+ e.Args[i-argoffset] = raw.Arg(i)
+ }
+ }
+ switch raw.typ {
+ case EvGoSysCall, EvGCSweepDone, EvGCSweepStart:
+ if e.G == 0 {
+ // missing some earlier G's from this P
+ continue // so we don't know which G is doing it
+ }
+ case EvGoStart, EvGoStartLocal, EvGoStartLabel:
+ p.lastG = e.Args[0]
+ e.G = p.lastG
+ if raw.typ == EvGoStartLabel {
+ e.SArgs = []string{p.Strings[e.Args[2]]}
+ }
+ case EvGCSTWStart:
+ e.G = 0
+ switch e.Args[0] {
+ case 0:
+ e.SArgs = []string{"mark termination"}
+ case 1:
+ e.SArgs = []string{"sweep termination"}
+ default:
+ return fmt.Errorf("unknown STW kind %d!=0,1 %s", e.Args[0], e)
+ }
+ case EvGCStart, EvGCDone, EvGCSTWDone:
+ e.G = 0
+ case EvGoEnd, EvGoStop, EvGoSched, EvGoPreempt,
+ EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
+ EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet,
+ EvGoSysBlock, EvGoBlockGC:
+ p.lastG = 0
+ if e.G == 0 {
+ // missing some earlier G's from this P
+ continue // so we don't know which G is doing it
+ }
+ case EvGoSysExit, EvGoWaiting, EvGoInSyscall:
+ e.G = e.Args[0]
+ case EvUserTaskCreate:
+ // e.Args 0: taskID, 1:parentID, 2:nameID
+ e.SArgs = []string{p.Strings[e.Args[2]]}
+ case EvUserRegion:
+ if e.G == 0 {
+ continue // don't know its G
+ }
+ // e.Args 0: taskID, 1: mode, 2:nameID
+ e.SArgs = []string{p.Strings[e.Args[2]]}
+ case EvUserLog:
+ // e.Args 0: taskID, 1:keyID, 2: stackID
+ e.SArgs = []string{p.Strings[e.Args[1]], raw.sarg}
+ }
+ p.byproc[p.lastP] = append(p.byproc[p.lastP], e)
+ }
+ return nil
+}
+
+func (p *Parsed) removeFutile(events []*Event) ([]*Event, int) {
+ // Phase 1: determine futile wakeup sequences.
+ type G struct {
+ futile bool
+ wakeup []*Event // wakeup sequence (subject for removal)
+ }
+ gs := make(map[uint64]G)
+ futile := make(map[*Event]bool)
+ cnt := 0
+ for _, ev := range events {
+ switch ev.Type {
+ case EvGoUnblock:
+ g := gs[ev.Args[0]]
+ g.wakeup = []*Event{ev}
+ gs[ev.Args[0]] = g
+ case EvGoStart, EvGoPreempt, EvFutileWakeup:
+ g := gs[ev.G]
+ g.wakeup = append(g.wakeup, ev)
+ if ev.Type == EvFutileWakeup {
+ g.futile = true
+ }
+ gs[ev.G] = g
+ case EvGoBlock, EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
+ EvGoBlockSync, EvGoBlockCond:
+ g := gs[ev.G]
+ if g.futile {
+ futile[ev] = true
+ for _, ev1 := range g.wakeup {
+ futile[ev1] = true
+ }
+ }
+ delete(gs, ev.G)
+ cnt++
+ }
+ }
+ // Phase 2: remove futile wakeup sequences.
+ newEvents := events[:0] // overwrite the original slice
+ for _, ev := range events {
+ if !futile[ev] {
+ newEvents = append(newEvents, ev)
+ }
+ }
+ return newEvents, cnt // PJW: cnt doesn't count the futile[]s
+}
+
+// Arg gets the n-th arg from a raw event
+func (r *rawEvent) Arg(n int) uint64 {
+ if n == 0 {
+ return r.arg0
+ }
+ return (*r.args)[n-1]
+}
+
+// report the number of arguments. (historical differences)
+func (p *Parsed) rawArgNum(ev *rawEvent) int {
+ desc := EventDescriptions[ev.typ]
+ switch ev.typ {
+ case EvStack, EvFrequency, EvTimerGoroutine:
+ p.Err = fmt.Errorf("%s unexpected in rawArgNum", evname(ev.typ))
+ return 0
+ }
+ narg := len(desc.Args)
+ if desc.Stack {
+ narg++
+ }
+ if ev.typ == EvBatch {
+ if p.Version < 1007 {
+ narg++ // used to be an extra unused arg
+ }
+ return narg
+ }
+ narg++ // timestamp
+ if p.Version < 1007 {
+ narg++ // sequence
+ }
+ // various special historical cases
+ switch ev.typ {
+ case EvGCSweepDone:
+ if p.Version < 1009 {
+ narg -= 2 // 1.9 added 2 args
+ }
+ case EvGCStart, EvGoStart, EvGoUnblock:
+ if p.Version < 1007 {
+ narg-- // one more since 1.7
+ }
+ case EvGCSTWStart:
+ if p.Version < 1010 {
+ narg-- // 1.10 added an argument
+ }
+ }
+ return narg
+}
diff --git a/src/internal/traceparser/file.go b/src/internal/traceparser/file.go
new file mode 100644
index 0000000000..4671edafb2
--- /dev/null
+++ b/src/internal/traceparser/file.go
@@ -0,0 +1,247 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// scan the trace file finding the header, starts of batches, and the trailer.
+// the trailer contains strings, stacks, and the clock frequency
+
+// There are two ways of thinking about the raw trace file. It starts with a 16
+// byte header "go 1.11 trace\0\0\0"
+// From the point of
+// view of the runtime, there is a collection of initializations for each goroutine.
+// These consist of an EvGoCreate, possibly followed by one of EvGoWaiting or
+// EvGoInSyscall if the go routine is waiting or in a syscall.
+// Then there is an EvProcStart for the first running goroutine, so there's a running P,
+// and then an EvGoStart for the first running goroutine. Then as the program runs, the
+// runtime emits trace events. Finally when the tracing stops, the runtime emits a footer
+// consisting of an EvFrequency (to convert ticks to nanoseconds) and some EvTimerGoroutines,
+// followed by EvStacks for all the stack frames.
+//
+// In the file, the header and footer are as described, but all the events in between come
+// in batches headed by EvBatch with the same P, and have to be rearranged into timestamp order.
+
+// New() scans once through the file to find the beginnings of all the batches (EvBatch) and
+// processes the footer extracting the strings and stacks.
+// Parse() finds the batches that overlap the desired time interval, and processes them into
+// events, dropping those outside the desired time interval. But it has to derive the missing
+// initializations from the events it sees, as it has no other access to the state of the runtime.
+// This is done in robust.go.
+
+// In more detail, scanFile() is called by commonInit() which is called by either New() or ParseBuffer().
+// It extracts the strings, the stacks, and remembers the locations of the Batches (all saved in *Parsed).
+
+// Parse first computes the rawEvents for the batches that overlap the requested interval.
+// It then calls createEvents() (events.go) which produces Events.
+
+func (p *Parsed) parseHeader() error {
+ p.r.Seek(0, 0)
+ var buf [16]byte
+ n, err := p.r.Read(buf[:])
+ if n != 16 || err != nil {
+ return fmt.Errorf("failed to red header: read %d bytes, not 16 %v", n, err)
+ }
+ // by hand. there are only 6 or so legitimate values; we could search for a match
+ if buf[0] != 'g' || buf[1] != 'o' || buf[2] != ' ' ||
+ buf[3] < '1' || buf[3] > '9' ||
+ buf[4] != '.' ||
+ buf[5] < '1' || buf[5] > '9' {
+ return fmt.Errorf("not a trace file")
+ }
+ ver := int(buf[5] - '0')
+ i := 0
+ for ; buf[6+i] >= '0' && buf[6+i] <= '9' && i < 2; i++ {
+ ver = ver*10 + int(buf[6+i]-'0')
+ }
+ ver += int(buf[3]-'0') * 1000
+ if !bytes.Equal(buf[6+i:], []byte(" trace\x00\x00\x00\x00")[:10-i]) {
+ return fmt.Errorf("not a trace file")
+ }
+ p.Version = ver
+ // PJW: reject 1005 and 1007? They need symbolization, which we don't do.
+ // Further, doing these would require 1.7 or earlier binaries.
+ switch ver {
+ case 1005, 1007:
+ break // no longer supported
+ case 1008, 1009:
+ return nil
+ case 1010, 1011:
+ return nil
+ }
+ return fmt.Errorf("%d unsupported version", ver)
+}
+
+func (p *Parsed) scanFile() error {
+ r := p.r
+ // fill in the following values for sure
+ strings := make(map[uint64]string)
+ p.Strings = strings // ok to save maps immediately
+ timerGoIDs := make(map[uint64]bool)
+ p.timerGoids = timerGoIDs
+ stacks := make(map[uint32][]*Frame)
+ framer := make(map[Frame]*Frame) // uniqify *Frame
+ p.Stacks = stacks
+ footerLoc := 0
+
+ var buf [1]byte
+ off := 16 // skip the header
+ n, err := r.Seek(int64(off), 0)
+ if err != nil || n != int64(off) {
+ return fmt.Errorf("Seek to %d got %d, err=%v", off, n, err)
+ }
+ var batchts int64 // from preceding batch
+ var lastEv byte
+ for {
+ off0 := off
+ n, err := r.Read(buf[:1])
+ if err == io.EOF {
+ break
+ } else if err != nil || n != 1 {
+ return fmt.Errorf("read failed at 0x%x, n=%d, %v",
+ off, n, err)
+ }
+ off += n
+ typ := buf[0] << 2 >> 2
+ if typ == EvNone || typ >= EvCount ||
+ EventDescriptions[typ].MinVersion > p.Version {
+ err = fmt.Errorf("unknown event type %v at offset 0x%x, pass 1", typ, off0)
+ return err
+ }
+ // extract and save the strings
+ if typ == EvString {
+ // String dictionary entry [ID, length, string].
+ var id uint64
+ id, off, err = readVal(r, off)
+ if err != nil {
+ return err
+ }
+ if id == 0 {
+ err = fmt.Errorf("string at offset %d has invalid id 0", off)
+ return err
+ }
+ if strings[id] != "" {
+ err = fmt.Errorf("string at offset %d has duplicate id %v", off, id)
+ return err
+ }
+ var ln uint64
+ ln, off, err = readVal(r, off)
+ if err != nil {
+ return err
+ }
+ if ln == 0 {
+ err = fmt.Errorf("string at offset %d has invalid length 0", off)
+ return err
+ }
+ if ln > 1e6 {
+ err = fmt.Errorf("string at offset %d has too large length %v", off, ln)
+ return err
+ }
+ buf := make([]byte, ln)
+ var n int
+ n, err = io.ReadFull(r, buf)
+ if err != nil {
+ err = fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, ln, err)
+ return err
+ }
+ off += n
+ strings[id] = string(buf)
+ lastEv = EvString
+ continue
+ }
+ p.Count++
+ if typ == EvFrequency {
+ // found footer, remember location, save value
+ footerLoc = off0
+ }
+ var args []uint64
+ off, args, err = p.argsAt(off0, typ)
+ if err != nil {
+ err = fmt.Errorf("argsAt error %v; off=%d off0=%d %s",
+ err, off, off0, evname(typ))
+ return err
+ }
+ r.Seek(int64(off), 0)
+ if typ == EvUserLog {
+ _, off, err = readStr(r, off)
+ if err != nil {
+ return err
+ }
+ }
+ if len(args) == 0 { // can't happen in well-formed trace file
+ return fmt.Errorf("len(args)==0 off=0x%x typ=%s", off, evname(typ))
+ }
+ switch typ {
+ case EvBatch:
+ if footerLoc == 0 {
+ // EvBatch in footer is just to have a header for stacks
+ locp := int64(args[0])
+ p.batches = append(p.batches,
+ batch{Off: off0, P: locp, Cycles: int64(args[1])})
+ // at this point we know when the previous batch ended!!
+ batchts = int64(args[1])
+ if batchts > p.maxticks {
+ p.maxticks = batchts
+ }
+ }
+ case EvFrequency:
+ p.TicksPerSec = int64(args[0])
+ case EvTimerGoroutine:
+ timerGoIDs[args[0]] = true
+ case EvStack:
+ if len(args) < 2 {
+ return fmt.Errorf("EvStack has too few args %d at 0x%x",
+ len(args), off0)
+ }
+ size := args[1]
+ if size > 1000 {
+ return fmt.Errorf("EvStack has %d frames at 0x%x",
+ size, off0)
+ }
+ want := 2 + 4*size
+ if uint64(len(args)) != want {
+ return fmt.Errorf("EvStack wants %d args, got %d, at 0x%x",
+ len(args), want, off0)
+ }
+ id := args[0]
+ if id != 0 && size > 0 {
+ stk := make([]*Frame, size)
+ for i := 0; i < int(size); i++ {
+ pc := args[2+i*4+0]
+ fn := args[2+i*4+1]
+ file := args[2+i*4+2]
+ line := args[2+i*4+3]
+ stk[i] = &Frame{PC: pc, Fn: strings[fn], File: strings[file], Line: int(line)}
+ if _, ok := framer[*stk[i]]; !ok {
+ framer[*stk[i]] = stk[i]
+ }
+ stk[i] = framer[*stk[i]]
+ }
+ stacks[uint32(id)] = stk
+ }
+ default:
+ if lastEv == EvBatch {
+ // p.MinTsVal is set by the first real event, not the first EvBatch
+ x := batchts + int64(args[0])
+ if x < p.minticks {
+ p.minticks = x
+ }
+ }
+ batchts += int64(args[0])
+ if batchts > p.maxticks {
+ p.maxticks = batchts
+ }
+ }
+ lastEv = typ
+ }
+ if footerLoc <= 0 {
+ return fmt.Errorf("malformed trace file, no EvFrequency")
+ }
+ return nil
+}
diff --git a/src/internal/traceparser/filebuf/filebuf.go b/src/internal/traceparser/filebuf/filebuf.go
new file mode 100755
index 0000000000..32d5a92c5b
--- /dev/null
+++ b/src/internal/traceparser/filebuf/filebuf.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filebuf implements io.SeekReader for os files.
+// This is useful only for very large files with lots of
+// seeking. (otherwise use ioutil.ReadFile or bufio)
+package filebuf
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+// Buf is the implemented interface
+type Buf interface {
+ io.ReadCloser
+ io.Seeker
+ Size() int64
+ Stats() Stat
+}
+
+// Buflen is the size of the internal buffer.
+// The code is designed to never need to reread unnecessarily
+const Buflen = 1 << 20
+
+// fbuf is a buffered file with seeking.
+// fixed is an internal buffer. buf is the slice fixed[:fixedLen]. bufloc is the file
+// location of the beginning of fixed (and buf). The seek pointer is at bufloc+bufpos,
+// so the file's contents there start with buf[bufpos:]
+type fbuf struct {
+ Name string
+ fd *os.File
+ size int64 // file size
+ bufloc int64 // file loc of beginning of fixed
+ bufpos int32 // seekptr is at bufloc+bufpos. bufpos <= Buflen, fixedLen
+ fixed [Buflen]byte // backing store for buf
+ fixedlen int // how much of fixed is valid file contents
+ buf []byte // buf is fixed[0:fixedlen]
+ // statistics
+ seeks int // number of calls to fd.Seek
+ reads int // number of calls to fd.Read
+ bytes int64 // number of bytes read by fd.Read
+}
+
+// Stat returns the number of underlying seeks and reads, and bytes read
+type Stat struct {
+ Seeks int
+ Reads int
+ Bytes int64
+}
+
+// Stats returns the stats so far
+func (fb *fbuf) Stats() Stat {
+ return Stat{fb.seeks, fb.reads, fb.bytes}
+}
+
+// Size returns the file size
+func (fb *fbuf) Size() int64 {
+ return fb.size
+}
+
+// New returns an initialized *fbuf or an error
+func New(fname string) (Buf, error) {
+ fd, err := os.Open(fname)
+ if err != nil {
+ return nil, err
+ }
+ fi, err := fd.Stat()
+ if err != nil || fi.Mode().IsDir() {
+ return nil, fmt.Errorf("not readable: %s", fname)
+ }
+ return &fbuf{Name: fname, fd: fd, size: fi.Size()}, nil
+}
+
+// Read implements io.Reader. It may return a positive
+// number of bytes read with io.EOF
+func (fb *fbuf) Read(p []byte) (int, error) {
+ // If there are enough valid bytes remaining in buf, just use them
+ if len(fb.buf[fb.bufpos:]) >= len(p) {
+ copy(p, fb.buf[fb.bufpos:])
+ fb.bufpos += int32(len(p))
+ return len(p), nil
+ }
+ done := 0 // done counts how many bytes have been transferred
+ // If there are any valid bytes left in buf, use them first
+ if len(fb.buf[fb.bufpos:]) > 0 {
+ m := copy(p, fb.buf[fb.bufpos:])
+ done = m
+ fb.bufpos += int32(done) // at end of the valid bytes in buf
+ }
+ // used up buffered data. logical seek pointer is at bufloc+bufpos.
+ // loop until p has been filled up or EOF
+ for done < len(p) {
+ loc, err := fb.fd.Seek(0, io.SeekCurrent) // make sure of the os's file location
+ if loc != fb.bufloc+int64(fb.bufpos) {
+ panic(fmt.Sprintf("%v loc=%d bufloc=%d bufpos=%d", err, loc,
+ fb.bufloc, fb.bufpos))
+ }
+ fb.seeks++ // did a file system seek
+ if loc >= fb.size {
+ // at EOF
+ fb.bufpos = int32(len(fb.buf))
+ fb.bufloc = loc - int64(fb.fixedlen)
+ return done, io.EOF
+ }
+ n, err := fb.fd.Read(fb.fixed[:])
+ if n != 0 {
+ fb.fixedlen = n
+ }
+ fb.reads++ // did a file system read
+ m := copy(p[done:], fb.fixed[:n])
+ done += m
+ if err != nil {
+ if err == io.EOF {
+ fb.bufpos = int32(len(fb.buf))
+ fb.bufloc = loc - int64(fb.fixedlen)
+ return done, io.EOF
+ }
+ return 0, err
+ }
+ fb.bytes += int64(n)
+ fb.bufpos = int32(m) // used m byes of the buffer
+ fb.bufloc = loc
+ fb.buf = fb.fixed[:n]
+ }
+ return len(p), nil
+}
+
+// Seek implements io.Seeker. (, io.EOF) is returned for seeks off the end.
+func (fb *fbuf) Seek(offset int64, whence int) (int64, error) {
+ seekpos := offset
+ switch whence {
+ case io.SeekCurrent:
+ seekpos += fb.bufloc + int64(fb.bufpos)
+ case io.SeekEnd:
+ seekpos += fb.size
+ }
+ if seekpos < 0 || seekpos > fb.size {
+ return fb.bufloc + int64(fb.bufpos), io.EOF
+ }
+ // if seekpos is inside fixed, just adjust buf and bufpos
+ if seekpos >= fb.bufloc && seekpos <= int64(fb.fixedlen)+fb.bufloc {
+ fb.bufpos = int32(seekpos - fb.bufloc)
+ return seekpos, nil
+ }
+ // need to refresh the internal buffer. Seek does no reading, mark buf
+ // as empty, set bufpos and bufloc.
+ fb.buf, fb.bufpos, fb.bufloc = nil, 0, seekpos
+ n, err := fb.fd.Seek(seekpos, io.SeekStart)
+ fb.seeks++
+ if n != seekpos || err != nil {
+ return -1, fmt.Errorf("seek failed (%d!= %d) %v", n, seekpos, err)
+ }
+ return seekpos, nil
+}
+
+// Close closes the underlying file
+func (fb *fbuf) Close() error {
+ if fb.fd != nil {
+ return fb.fd.Close()
+ }
+ return nil
+}
diff --git a/src/internal/traceparser/filebuf/filebuf_test.go b/src/internal/traceparser/filebuf/filebuf_test.go
new file mode 100755
index 0000000000..7a735715ed
--- /dev/null
+++ b/src/internal/traceparser/filebuf/filebuf_test.go
@@ -0,0 +1,204 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filebuf
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "testing"
+)
+
+var (
+ inited bool
+ small, large string // files
+ dir string // in this dir
+ contents []byte // contents of the large file
+)
+
+func TestMain(m *testing.M) {
+ create()
+ n := m.Run()
+
+ os.RemoveAll(dir)
+ os.Exit(n)
+}
+
+func create() {
+ if inited {
+ return
+ }
+ log.SetFlags(log.Lshortfile)
+ d, erra := ioutil.TempDir("", "filebuf")
+ s, errb := ioutil.TempFile(dir, "small")
+ l, errc := ioutil.TempFile(dir, "large")
+ if erra != nil || errb != nil || errc != nil {
+ log.Fatal(erra, errb, errc)
+ }
+ dir, small, large = d, s.Name(), l.Name()
+ buf := make([]byte, 2*Buflen+3)
+ for i := 0; i < len(buf); i++ {
+ buf[i] = byte(i)
+ }
+ err := ioutil.WriteFile(small, buf[:7], 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = ioutil.WriteFile(large, buf, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ contents = buf
+ inited = true
+}
+
+func get(n int) io.Reader {
+ if n <= len(contents) {
+ return bytes.NewReader(contents[:n])
+ }
+ return bytes.NewReader(contents)
+}
+
+func TestSmall(t *testing.T) {
+ var f Buf
+ var err error
+ f, err = New(small)
+ small := func(t *testing.T) {
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := make([]byte, 23)
+ n, err := f.Read(buf)
+ if n != 7 || err != io.EOF {
+ t.Errorf("got %d, expected 7, %v", n, err)
+ }
+ m, err := f.Seek(0, io.SeekCurrent)
+ if m != 7 || err != nil {
+ t.Errorf("got %d, expected 7, %v", m, err)
+ }
+ m, err = f.Seek(1, io.SeekStart)
+ if m != 1 || err != nil {
+ t.Errorf("got %d expected 1, %v", m, err)
+ }
+ n, err = f.Read(buf)
+ if n != 6 || err != io.EOF {
+ t.Errorf("got %d, expected 6, %v", n, err)
+ }
+ for i := 0; i < 6; i++ {
+ if buf[i] != byte(i+1) {
+ t.Fatalf("byte %d is %d, not %d, %v", i, buf[i], i+1, buf)
+ }
+ }
+ }
+ t.Run("New", small)
+ f, err = FromReader(get(7))
+ t.Run("Rdr", small)
+}
+
+func TestLarge(t *testing.T) {
+ var f Buf
+ var err error
+ big := func(t *testing.T) {
+ if err != nil {
+ t.Fatal(err)
+ }
+ x := Buflen - 7
+ n, err := f.Seek(int64(x), io.SeekStart)
+ if n != Buflen-7 || err != nil {
+ t.Fatalf("expected %d, got %d, %v", x, n, err)
+ }
+ buf := make([]byte, 23)
+ m, err := f.Read(buf)
+ if m != len(buf) || err != nil {
+ t.Fatalf("expected %d, got %d, %v", len(buf), m, err)
+ }
+ for i := 0; i < 23; i++ {
+ if buf[i] != byte(x+i) {
+ t.Fatalf("byte %d, got %d, wanted %d", i, buf[i],
+ byte(x+i))
+ }
+ }
+ m, err = f.Read(buf)
+ if m != len(buf) || err != nil {
+ t.Fatalf("got %d, expected %d, %v", m, len(buf), err)
+ }
+ x += len(buf)
+ for i := 0; i < 23; i++ {
+ if buf[i] != byte(x+i) {
+ t.Fatalf("byte %d, got %d, wanted %d", i, buf[i],
+ byte(x+i))
+ }
+ }
+ }
+ f, err = New(large)
+ t.Run("New", big)
+ f, err = FromReader(get(1 << 30))
+ t.Run("Rdr", big)
+}
+
+func TestMore(t *testing.T) {
+ f, err := New(large)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var a, b [4]byte
+ f.Seek(16, 0)
+ f.Read(a[:])
+ f.Seek(16, 0)
+ f.Read(b[:])
+ if a != b {
+ t.Errorf("oops %v %v", a, b)
+ }
+}
+
+func TestSeek(t *testing.T) {
+ f, err := New(small)
+ if err != nil {
+ log.Fatal(err)
+ }
+ n, err := f.Seek(f.Size(), 0)
+ if n != f.Size() || err != nil {
+ t.Errorf("seek got %d, expected %d, %v", n, f.Size(), err)
+ }
+ n, err = f.Seek(1, io.SeekCurrent)
+ if n != f.Size() || err != io.EOF {
+ t.Errorf("n=%d, expected 0. %v", n, err)
+ }
+ n, err = f.Seek(f.Size(), 0)
+ if n != f.Size() || err != nil {
+ t.Errorf("seek got %d, expected %d, %v", n, f.Size(), err)
+ }
+}
+
+func TestReread(t *testing.T) {
+ f, err := New(small)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var buf [1]byte
+ f.Seek(0, 0)
+ for i := 0; i < int(f.Size()); i++ {
+ n, err := f.Read(buf[:])
+ if n != 1 || err != nil {
+ t.Fatalf("n=%d, err=%v", n, err)
+ }
+ }
+ stats := f.Stats()
+ if stats.Bytes != f.Size() || stats.Reads != 1 || stats.Seeks != 1 {
+ t.Errorf("%v %d %d", stats, f.(*fbuf).bufloc, f.(*fbuf).bufpos)
+ }
+ n, err := f.Read(buf[:])
+ if n != 0 || err != io.EOF {
+ t.Fatalf("expected 0 and io.EOF, got %d %v", n, err)
+ }
+ f.Seek(0, 0)
+ xstats := f.Stats()
+ if xstats.Bytes != f.Size() || xstats.Reads != 1 || xstats.Seeks != 2 {
+ t.Errorf("%v %v %d %d", stats, xstats, f.(*fbuf).bufloc, f.(*fbuf).bufpos)
+ }
+ f.Close()
+}
diff --git a/src/internal/traceparser/filebuf/fromreader.go b/src/internal/traceparser/filebuf/fromreader.go
new file mode 100644
index 0000000000..736cbf5e42
--- /dev/null
+++ b/src/internal/traceparser/filebuf/fromreader.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filebuf
+
+import (
+ "bytes"
+ "io"
+)
+
+// implement a Buf version from an io.Reader
+
+type rbuf struct {
+ buf []byte // contents
+ pos int64
+ seeks, reads int // number of calls. 0 seems right.
+}
+
+func (r *rbuf) Stats() Stat {
+ return Stat{r.seeks, r.reads, int64(len(r.buf))}
+}
+
+func (r *rbuf) Size() int64 {
+ return int64(len(r.buf))
+}
+
+// FromReader creates a Buf by copying the contents of an io.Reader
+func FromReader(rd io.Reader) (Buf, error) {
+ r := &rbuf{}
+ x := bytes.NewBuffer(r.buf)
+ _, err := io.Copy(x, rd)
+ r.buf = x.Bytes()
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+func (r *rbuf) Close() error {
+ return nil
+}
+
+func (r *rbuf) Read(p []byte) (int, error) {
+ n := copy(p, r.buf[r.pos:])
+ r.pos += int64(n)
+ if n == 0 || n < len(p) {
+ return n, io.EOF
+ }
+ return n, nil
+}
+
+func (r *rbuf) Seek(offset int64, whence int) (int64, error) {
+ seekpos := offset
+ switch whence {
+ case io.SeekCurrent:
+ seekpos += int64(r.pos)
+ case io.SeekEnd:
+ seekpos += int64(len(r.buf))
+ }
+ if seekpos < 0 || seekpos > int64(len(r.buf)) {
+ if seekpos < 0 {
+ r.pos = 0
+ return 0, nil
+ }
+ r.pos = int64(len(r.buf))
+ return r.pos, nil
+ }
+ r.pos = seekpos
+ return seekpos, nil
+}
diff --git a/src/internal/traceparser/fuzz.go b/src/internal/traceparser/fuzz.go
new file mode 100644
index 0000000000..666ee945fb
--- /dev/null
+++ b/src/internal/traceparser/fuzz.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gofuzz
+
+package traceparser
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+)
+
+// at first we ran the old parser, and return 0 if it failed, on the theory that we don't have
+// to do better. But that leads to very few crashes to look at.
+// Maybe better just to make it so that the new parser doesn't misbehave, and if it doesn't get
+// an error, that the old parser gets the same results. (up to whatever)
+// perhaps even better would be to seed corpus with examples from which the 16-byte header
+// has been stripped, and add it in Fuzz, so the fuzzer doesn't spend a lot of time making
+// changes we reject in the header. (this may not be necessary)
+
+func Fuzz(data []byte) int {
+ if len(data) < 16 {
+ return 0
+ }
+ switch x := string(data[:16]); x {
+ default:
+ return 0
+ case "go 1.9 trace\000\000\000\000":
+ break
+ case "go 1.10 trace\000\000\000":
+ break
+ case "go 1.11 trace\000\000\000":
+ break
+ }
+ p, errp := ParseBuffer(bytes.NewBuffer(data))
+ if errp != nil {
+ if p != nil {
+ panic(fmt.Sprintf("p not nil on error %s", errp))
+ }
+ }
+ // TODO(pjw): if no errors, compare parses?
+ return 1
+}
+
+func init() {
+ log.SetFlags(log.Lshortfile)
+}
diff --git a/src/internal/traceparser/goroutines.go b/src/internal/traceparser/goroutines.go
new file mode 100644
index 0000000000..5fe22f4f29
--- /dev/null
+++ b/src/internal/traceparser/goroutines.go
@@ -0,0 +1,341 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+import "sort"
+
+// GDesc contains statistics and execution details of a single goroutine.
+type GDesc struct {
+ ID uint64
+ Name string
+ PC uint64
+ CreationTime int64
+ StartTime int64
+ EndTime int64
+
+ // List of regions in the goroutine, sorted based on the start time.
+ Regions []*UserRegionDesc
+
+ // Statistics of execution time during the goroutine execution.
+ GExecutionStat
+
+ *gDesc // private part.
+}
+
+// UserRegionDesc represents a region and goroutine execution stats
+// while the region was active.
+type UserRegionDesc struct {
+ TaskID uint64
+ Name string
+
+ // Region start event. Normally EvUserRegion start event or nil,
+ // but can be EvGoCreate event if the region is a synthetic
+ // region representing task inheritance from the parent goroutine.
+ Start *Event
+
+ // Region end event. Normally EvUserRegion end event or nil,
+ // but can be EvGoStop or EvGoEnd event if the goroutine
+ // terminated without explicitely ending the region.
+ End *Event
+
+ GExecutionStat
+}
+
+// GExecutionStat contains statistics about a goroutine's execution
+// during a period of time.
+type GExecutionStat struct {
+ ExecTime int64
+ SchedWaitTime int64
+ IOTime int64
+ BlockTime int64
+ SyscallTime int64
+ GCTime int64
+ SweepTime int64
+ TotalTime int64
+}
+
+// sub returns the stats v-s.
+func (s GExecutionStat) sub(v GExecutionStat) (r GExecutionStat) {
+ r = s
+ r.ExecTime -= v.ExecTime
+ r.SchedWaitTime -= v.SchedWaitTime
+ r.IOTime -= v.IOTime
+ r.BlockTime -= v.BlockTime
+ r.SyscallTime -= v.SyscallTime
+ r.GCTime -= v.GCTime
+ r.SweepTime -= v.SweepTime
+ r.TotalTime -= v.TotalTime
+ return r
+}
+
+// snapshotStat returns the snapshot of the goroutine execution statistics.
+// This is called as we process the ordered trace event stream. lastTs and
+// activeGCStartTime are used to process pending statistics if this is called
+// before any goroutine end event.
+func (g *GDesc) snapshotStat(lastTs, activeGCStartTime int64) (ret GExecutionStat) {
+ ret = g.GExecutionStat
+
+ if g.gDesc == nil {
+ return ret // finalized GDesc. No pending state.
+ }
+
+ if activeGCStartTime != 0 { // terminating while GC is active
+ if g.CreationTime < activeGCStartTime {
+ ret.GCTime += lastTs - activeGCStartTime
+ } else {
+ // The goroutine's lifetime completely overlaps
+ // with a GC.
+ ret.GCTime += lastTs - g.CreationTime
+ }
+ }
+
+ if g.TotalTime == 0 {
+ ret.TotalTime = lastTs - g.CreationTime
+ }
+
+ if g.lastStartTime != 0 {
+ ret.ExecTime += lastTs - g.lastStartTime
+ }
+ if g.blockNetTime != 0 {
+ ret.IOTime += lastTs - g.blockNetTime
+ }
+ if g.blockSyncTime != 0 {
+ ret.BlockTime += lastTs - g.blockSyncTime
+ }
+ if g.blockSyscallTime != 0 {
+ ret.SyscallTime += lastTs - g.blockSyscallTime
+ }
+ if g.blockSchedTime != 0 {
+ ret.SchedWaitTime += lastTs - g.blockSchedTime
+ }
+ if g.blockSweepTime != 0 {
+ ret.SweepTime += lastTs - g.blockSweepTime
+ }
+ return ret
+}
+
+// finalize is called when processing a goroutine end event or at
+// the end of trace processing. This finalizes the execution stat
+// and any active regions in the goroutine, in which case trigger is nil.
+func (g *GDesc) finalize(lastTs, activeGCStartTime int64, trigger *Event) {
+ if trigger != nil {
+ g.EndTime = trigger.Ts
+ }
+ finalStat := g.snapshotStat(lastTs, activeGCStartTime)
+
+ g.GExecutionStat = finalStat
+ for _, s := range g.activeRegions {
+ s.End = trigger
+ s.GExecutionStat = finalStat.sub(s.GExecutionStat)
+ g.Regions = append(g.Regions, s)
+ }
+ *(g.gDesc) = gDesc{}
+}
+
+// gDesc is a private part of GDesc that is required only during analysis.
+type gDesc struct {
+ lastStartTime int64
+ blockNetTime int64
+ blockSyncTime int64
+ blockSyscallTime int64
+ blockSweepTime int64
+ blockGCTime int64
+ blockSchedTime int64
+
+ activeRegions []*UserRegionDesc // stack of active regions
+}
+
+// GoroutineStats generates statistics for all goroutines in the trace segment.
+func (p *Parsed) GoroutineStats() map[uint64]*GDesc {
+ events := p.Events
+ gs := make(map[uint64]*GDesc)
+ var lastTs int64
+ var gcStartTime int64 // gcStartTime == 0 indicates gc is inactive.
+ for _, ev := range events {
+ lastTs = ev.Ts
+ switch ev.Type {
+ case EvGoCreate:
+ g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gDesc: new(gDesc)}
+ g.blockSchedTime = ev.Ts
+ // When a goroutine is newly created, inherit the
+ // task of the active region. For ease handling of
+ // this case, we create a fake region description with
+ // the task id.
+ if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gDesc.activeRegions) > 0 {
+ regions := creatorG.gDesc.activeRegions
+ s := regions[len(regions)-1]
+ if s.TaskID != 0 {
+ g.gDesc.activeRegions = []*UserRegionDesc{
+ {TaskID: s.TaskID, Start: ev},
+ }
+ }
+ }
+ gs[g.ID] = g
+ case EvGoStart, EvGoStartLabel:
+ g := gs[ev.G]
+ if g.PC == 0 {
+ stk := p.Stacks[ev.StkID]
+ g.PC = stk[0].PC
+ g.Name = stk[0].Fn
+ }
+ g.lastStartTime = ev.Ts
+ if g.StartTime == 0 {
+ g.StartTime = ev.Ts
+ }
+ if g.blockSchedTime != 0 {
+ g.SchedWaitTime += ev.Ts - g.blockSchedTime
+ g.blockSchedTime = 0
+ }
+ case EvGoEnd, EvGoStop:
+ g := gs[ev.G]
+ g.finalize(ev.Ts, gcStartTime, ev)
+ case EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
+ EvGoBlockSync, EvGoBlockCond:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockSyncTime = ev.Ts
+ case EvGoSched, EvGoPreempt:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockSchedTime = ev.Ts
+ case EvGoSleep, EvGoBlock:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ case EvGoBlockNet:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockNetTime = ev.Ts
+ case EvGoBlockGC:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockGCTime = ev.Ts
+ case EvGoUnblock:
+ g := gs[ev.Args[0]]
+ if g.blockNetTime != 0 {
+ g.IOTime += ev.Ts - g.blockNetTime
+ g.blockNetTime = 0
+ }
+ if g.blockSyncTime != 0 {
+ g.BlockTime += ev.Ts - g.blockSyncTime
+ g.blockSyncTime = 0
+ }
+ g.blockSchedTime = ev.Ts
+ case EvGoSysBlock:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockSyscallTime = ev.Ts
+ case EvGoSysExit:
+ g := gs[ev.G]
+ if g.blockSyscallTime != 0 {
+ g.SyscallTime += ev.Ts - g.blockSyscallTime
+ g.blockSyscallTime = 0
+ }
+ g.blockSchedTime = ev.Ts
+ case EvGCSweepStart:
+ g := gs[ev.G]
+ if g != nil {
+ // Sweep can happen during GC on system goroutine.
+ g.blockSweepTime = ev.Ts
+ }
+ case EvGCSweepDone:
+ g := gs[ev.G]
+ if g != nil && g.blockSweepTime != 0 {
+ g.SweepTime += ev.Ts - g.blockSweepTime
+ g.blockSweepTime = 0
+ }
+ case EvGCStart:
+ gcStartTime = ev.Ts
+ case EvGCDone:
+ for _, g := range gs {
+ if g.EndTime != 0 {
+ continue
+ }
+ if gcStartTime < g.CreationTime {
+ g.GCTime += ev.Ts - g.CreationTime
+ } else {
+ g.GCTime += ev.Ts - gcStartTime
+ }
+ }
+ gcStartTime = 0 // indicates gc is inactive.
+ case EvUserRegion:
+ g := gs[ev.G]
+ switch mode := ev.Args[1]; mode {
+ case 0: // region start
+ g.activeRegions = append(g.activeRegions, &UserRegionDesc{
+ Name: ev.SArgs[0],
+ TaskID: ev.Args[0],
+ Start: ev,
+ GExecutionStat: g.snapshotStat(lastTs, gcStartTime),
+ })
+ case 1: // region end
+ var sd *UserRegionDesc
+ if regionStk := g.activeRegions; len(regionStk) > 0 {
+ n := len(regionStk)
+ sd = regionStk[n-1]
+ regionStk = regionStk[:n-1] // pop
+ g.activeRegions = regionStk
+ } else {
+ sd = &UserRegionDesc{
+ Name: ev.SArgs[0],
+ TaskID: ev.Args[0],
+ }
+ }
+ sd.GExecutionStat = g.snapshotStat(lastTs, gcStartTime).sub(sd.GExecutionStat)
+ sd.End = ev
+ g.Regions = append(g.Regions, sd)
+ }
+ }
+ }
+
+ for _, g := range gs {
+ g.finalize(lastTs, gcStartTime, nil)
+
+ // sort based on region start time
+ sort.Slice(g.Regions, func(i, j int) bool {
+ x := g.Regions[i].Start
+ y := g.Regions[j].Start
+ if x == nil {
+ return true
+ }
+ if y == nil {
+ return false
+ }
+ return x.Ts < y.Ts
+ })
+
+ g.gDesc = nil
+ }
+
+ return gs
+}
+
+// RelatedGoroutines finds a set of goroutines related to goroutine goid.
+func (p *Parsed) RelatedGoroutines(goid uint64) map[uint64]bool {
+ events := p.Events
+ // BFS of depth 2 over "unblock" edges
+ // (what goroutines unblock goroutine goid?).
+ gmap := make(map[uint64]bool)
+ gmap[goid] = true
+ for i := 0; i < 2; i++ {
+ gmap1 := make(map[uint64]bool)
+ for g := range gmap {
+ gmap1[g] = true
+ }
+ for _, ev := range events {
+ if ev.Type == EvGoUnblock && gmap[ev.Args[0]] {
+ gmap1[ev.G] = true
+ }
+ }
+ gmap = gmap1
+ }
+ gmap[0] = true // for GC events
+ return gmap
+}
diff --git a/src/internal/traceparser/parser_test.go b/src/internal/traceparser/parser_test.go
new file mode 100644
index 0000000000..7df56fe1f9
--- /dev/null
+++ b/src/internal/traceparser/parser_test.go
@@ -0,0 +1,107 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+var (
+ // testfiles from the old trace parser
+ otherDir = "../trace/testdata/"
+ want = map[string]bool{"http_1_9_good": true, "http_1_10_good": true, "http_1_11_good": true,
+ "stress_1_9_good": true, "stress_1_10_good": true, "stress_1_11_good": true,
+ "stress_start_stop_1_9_good": true, "stress_start_stop_1_10_good": true,
+ "stress_start_stop_1_11_good": true, "user_task_span_1_11_good": true,
+
+ "http_1_5_good": false, "http_1_7_good": false,
+ "stress_1_5_good": false, "stress_1_5_unordered": false, "stress_1_7_good": false,
+ "stress_start_stop_1_5_good": false, "stress_start_stop_1_7_good": false,
+ }
+)
+
+func TestRemoteFiles(t *testing.T) {
+ files, err := ioutil.ReadDir(otherDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, f := range files {
+ fname := filepath.Join(otherDir, f.Name())
+ p, err := New(fname)
+ if err == nil {
+ err = p.Parse(0, 1<<62, nil)
+ }
+ if err == nil != want[f.Name()] {
+ t.Errorf("%s: got %v expected %v, err=%v",
+ f.Name(), err == nil, want[f.Name()], err)
+ }
+ }
+}
+
+func TestLocalFiles(t *testing.T) {
+
+ files, err := ioutil.ReadDir("./testdata")
+ if err != nil {
+ t.Fatalf("failed to read ./testdata: %v", err)
+ }
+ for _, f := range files {
+ fname := filepath.Join("./testdata", f.Name())
+ p, err := New(fname)
+ if err == nil {
+ err = p.Parse(0, 1<<62, nil)
+ }
+ switch {
+ case strings.Contains(f.Name(), "good"),
+ strings.Contains(f.Name(), "weird"):
+ if err != nil {
+ t.Errorf("unexpected failure %v %s", err, f.Name())
+ }
+ case strings.Contains(f.Name(), "bad"):
+ if err == nil {
+ t.Errorf("bad file did not fail %s", f.Name())
+ }
+ default:
+ t.Errorf("untyped file %v %s", err, f.Name())
+ }
+ }
+}
+
+func TestStats(t *testing.T) {
+ // Need just one good file to see that OSStats work properly,
+ files, err := ioutil.ReadDir("./testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, f := range files {
+ if !strings.HasPrefix(f.Name(), "good") {
+ continue
+ }
+ fname := filepath.Join("./testdata", f.Name())
+ p, err := New(fname)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stat := p.OSStats()
+ if stat.Bytes == 0 || stat.Seeks == 0 || stat.Reads == 0 {
+ t.Errorf("OSStats impossible %v", stat)
+ }
+ fd, err := os.Open(fname)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pb, err := ParseBuffer(fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stat = pb.OSStats()
+ if stat.Seeks != 0 || stat.Reads != 0 {
+ t.Errorf("unexpected positive results %v", stat)
+ }
+ }
+}
diff --git a/src/internal/traceparser/raw.go b/src/internal/traceparser/raw.go
new file mode 100644
index 0000000000..e36a951475
--- /dev/null
+++ b/src/internal/traceparser/raw.go
@@ -0,0 +1,106 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "log"
+)
+
+// convert batches into their raw events. For small intervals (1 or 10 seconds)
+// this takes about 40% of the total Parse time.
+
+func (p *Parsed) batchify(b *batch) error {
+ evs := make([]rawEvent, 0)
+ p.seenArgs = make(map[uint64]*[]uint64)
+ hasher := fnv.New64()
+ r := p.r
+ r.Seek(int64(b.Off), 0)
+ var buf [1]byte
+ seenBatch := false // to terminate the loop on the second EvBatch
+
+ for off := b.Off; ; {
+ off0 := off // remember the beginning of the event
+ n, err := r.Read(buf[:])
+ if err != nil {
+ return err
+ }
+ off += n
+ typ := buf[0] << 2 >> 2 // event type is bottom 6 bits
+ if typ == EvFrequency || (typ == EvBatch && seenBatch) {
+ break // found trailer, or next batch
+ }
+ if typ == EvBatch {
+ seenBatch = true
+ }
+ if typ == EvString {
+ // skip over it. error checking was done in file.go
+ _, off, _ = readVal(r, off)
+ var ln uint64
+ ln, off, _ = readVal(r, off)
+ // PJW: why not just seek ahead ln bytes?
+ if false {
+ buf := make([]byte, ln)
+ var n int
+ n, _ = io.ReadFull(r, buf)
+ off += n
+ } else {
+ n, _ := r.Seek(int64(ln), 1)
+ off = int(n)
+ }
+ continue
+ }
+ // build the raw event and collect its arguments
+ ev := rawEvent{typ: typ, off: uint32(off0 - b.Off)}
+ var args []uint64
+ off, args, err = p.argsAt(off0, typ)
+ if err != nil {
+ // PJW: make sure this is useful
+ return fmt.Errorf("parsing %s failed at P=%d off=%d %v", evname(typ),
+ b.P, off0, err)
+ }
+
+ // have we seen the args before?
+ if len(args) > 0 {
+ ev.arg0 = args[0]
+ if len(args) > 1 {
+ hasher.Reset()
+ for i := 1; i < len(args); i++ {
+ var x [8]byte
+ binary.LittleEndian.PutUint64(x[:], args[i])
+ _, err := hasher.Write(x[:])
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ hc := hasher.Sum64()
+ old, ok := p.seenArgs[hc]
+ if !ok {
+ final := make([]uint64, len(args)-1)
+ copy(final, args[1:])
+ p.seenArgs[hc] = &final
+ } else {
+ // is this a collision? PJW: make this precisely right
+ if len(*old) != len(args[1:]) {
+ log.Fatalf("COLLISION old:%v this:%v", *old, args[1:])
+ }
+ }
+ ev.args = p.seenArgs[hc]
+ }
+ }
+ if typ == EvUserLog {
+ // argsAt didn't read the string argument
+ var s string
+ s, off, err = readStr(r, off)
+ ev.sarg = s
+ }
+ evs = append(evs, ev)
+ }
+ b.raws = evs
+ return nil
+}
diff --git a/src/internal/traceparser/robust.go b/src/internal/traceparser/robust.go
new file mode 100644
index 0000000000..91748c592f
--- /dev/null
+++ b/src/internal/traceparser/robust.go
@@ -0,0 +1,585 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+// there are panics for impossible situations. probably an error would be better
+// (if only it were certain these are impossible)
+
+import (
+ "fmt"
+ "log"
+)
+
+// repair an incomplete or possibly damaged interval of Events
+// so that postProcess is happy
+
+// errors returned by checkRunning()
+const (
+ ok = 0
+ badRunning = 1 << iota
+ badP
+ badG0
+)
+
+// states of g's and p's
+type gdesc struct {
+ state gStatus
+ ev, evStart, evCreate, evMarkAssist *Event
+}
+
+type pdesc struct {
+ running bool
+ g uint64
+ evSTW, evSweep *Event
+}
+
+func locstr(ev *Event) string {
+ if ev == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s:%x", evname(ev.Type), ev.Ts)
+}
+func (p pdesc) String() string {
+ return fmt.Sprintf("[%v %d %s %s]", p.running, p.g, locstr(p.evSTW), locstr(p.evSweep))
+}
+
+func (g gdesc) String() string {
+ var nm string
+ switch g.state {
+ case gDead:
+ nm = "dead"
+ case gWaiting:
+ nm = "waiting"
+ case gRunnable:
+ nm = "runnable"
+ case gRunning:
+ nm = "running"
+ }
+ f := locstr
+ return fmt.Sprintf("[%s %s,%s,%s,%s]", nm, f(g.ev), f(g.evStart),
+ f(g.evCreate), f(g.evMarkAssist))
+}
+
+func checkRunning(pd pdesc, gd gdesc, ev *Event, okG0 bool) int {
+ ret := ok
+ if gd.state != gRunning {
+ ret |= badRunning
+ }
+ if pd.g != ev.G {
+ ret |= badP
+ }
+ if !okG0 && ev.G == 0 {
+ ret |= badG0
+ }
+ return ret
+}
+
+type aux struct {
+ pref []*Event // prefix
+ evs []*Event // copies and inserted
+ deleted map[byte]int // count by Type
+ inserted map[byte]int // count by Type
+ gs map[uint64]gdesc
+ ps map[int32]pdesc
+ g gdesc
+ px pdesc
+ my *Parsed
+ input []*Event // events in call to robust()
+ last int // last index handled by reorder
+ err error // report inconsistent trace files
+}
+
+func (a *aux) preftime() int64 {
+ ts := a.my.MinWant - 1000
+ if ts < 0 {
+ ts = 0
+ }
+ if len(a.pref) > 0 {
+ ts = a.pref[len(a.pref)-1].Ts + 1
+ }
+ return ts
+}
+func (a *aux) delete(i int, ev *Event) {
+ a.deleted[ev.Type]++
+}
+func (a *aux) prefix(typ byte, g uint64, p int32) {
+ ts := a.preftime()
+ ev := &Event{Type: typ, G: g, P: p, Ts: ts,
+ Args: [3]uint64{0, 0, 1}}
+ a.pref = append(a.pref, ev)
+}
+func (a *aux) procstart(p int32) {
+ if p >= FakeP || a.px.running {
+ return
+ }
+ a.prefix(EvProcStart, 0, p)
+ a.px.running = true
+}
+func (a *aux) makewaiting(i int, g uint64, typ byte) {
+ // GoCreate, g=0 args[0]=g; maybe it exists already?
+ // GoWaiting or GoInSysCall
+ p := int32(a.my.batches[0].P)
+ ev := &Event{Type: EvGoCreate, P: p,
+ Ts: a.preftime(), Args: [3]uint64{g, 0, 2}}
+ a.pref = append(a.pref, ev)
+ a.gs[g] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
+ ev = &Event{Type: typ, G: g, P: p,
+ Ts: a.preftime(), Args: [3]uint64{g, 0, 3}}
+ a.pref = append(a.pref, ev)
+ switch typ {
+ default:
+ panic(fmt.Sprintf("weird typ %s in makewaiting", evname(typ)))
+ case EvGoWaiting, EvGoInSyscall:
+ // ok
+ }
+}
+
+func (a *aux) makerunnable(i int, ev *Event) {
+ // Create, Sched, Preempt, or Unblock
+ switch a.gs[ev.G].state {
+ case gDead:
+ p := int32(a.my.batches[0].P)
+ ev := &Event{Type: EvGoCreate, P: p,
+ Ts: a.preftime(), Args: [3]uint64{ev.G, 0, 4}}
+ a.pref = append(a.pref, ev)
+ a.gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
+ case gRunnable:
+ return
+ case gRunning:
+ //a.prevs(i)
+ a.err = fmt.Errorf("gRunning %d:%s", i, ev)
+ case gWaiting:
+ //a.prevs(i)
+ a.err = fmt.Errorf("no consistent ordering possible %d:%s", i, ev)
+ }
+}
+func (a *aux) makerunning(i int, ev *Event) {
+ // GoStart once it is runnable
+ switch a.g.state {
+ case gDead:
+ a.makerunnable(i, ev)
+ case gRunnable:
+ break
+ case gRunning:
+ return
+ case gWaiting:
+ a.err = fmt.Errorf("gWaiting in makerunnable %d:%s %+v", i, ev, a.g)
+ }
+ // PJW: which P? Probably need a ProcStart once
+ if !a.px.running {
+ a.procstart(ev.P)
+ }
+ p := ev.P
+ if p == TimerP {
+ p = int32(ev.Args[2]) // from events.go:71
+ ev.Args[2] = 0
+ }
+ x := &Event{Type: EvGoStart, G: ev.G, P: p, Args: [3]uint64{ev.G, 0, 5}}
+ x.Ts = ev.Ts - 1
+ a.evs = append(a.evs, x)
+ a.g.state = gRunning
+ a.g.evStart = x
+ a.px.g = x.G
+ a.inserted[EvGoStart]++
+}
+
+func (p *Parsed) robust(events []*Event) ([]*Event, *aux, error) { // *aux for debugging (CheckRobust)
+ a := new(aux)
+ a.gs = make(map[uint64]gdesc)
+ a.ps = make(map[int32]pdesc)
+ var evGC, evSTW *Event
+ tasks := make(map[uint64]*Event) // task id to create
+ activeSpans := make(map[uint64][]*Event)
+ a.gs[0] = gdesc{state: gRunning} // bootstrap
+ a.deleted = make(map[byte]int)
+ a.inserted = make(map[byte]int)
+ a.my = p
+ a.input = events
+
+ for i, ev := range events {
+ if a.err != nil {
+ break
+ }
+ if i < len(events)-1 && ev.Ts == events[i+1].Ts &&
+ i > a.last {
+ // sigh. dragonfly, or similar trouble.
+ // a.last is to avoid overlapping calls
+ // This is a placeholder if needed.
+ //a.reorder(i, events)
+ ev = events[i]
+ }
+ var gok, pok bool
+ a.g, gok = a.gs[ev.G]
+ a.px, pok = a.ps[ev.P]
+ switch ev.Type {
+ case EvProcStart:
+ if a.px.running { // This doesn't happen, but to be safe
+ a.delete(i, ev) // already started
+ continue
+ }
+ a.px.running = true
+ case EvProcStop:
+ if !pok { // Ok to delete, as we've never heard of it
+ a.delete(i, ev)
+ continue
+ }
+ if !a.px.running {
+ a.procstart(ev.P)
+ }
+ if a.px.g != 0 {
+ // p is running a g! Stop the g? Ignore the Stop?
+ // Ignore the Stop. I don't think this happens.
+ // (unless there are equal Ts's or the file is corrupt)
+ a.err = fmt.Errorf("unexpected %d:%s %v", i, ev, a.px)
+ // a.delete(i, ev) // PJW
+ continue
+ }
+ a.px.running = false
+ case EvGCStart:
+ if evGC != nil {
+ // already running; doesn't happen
+ a.delete(i, ev)
+ continue
+ }
+ evGC = ev
+ case EvGCDone:
+ if evGC == nil {
+ // no GCStart to link it to: choice is lying about
+ // the duration or the existence. Do the latter
+ a.delete(i, ev)
+ continue
+ }
+ evGC = nil
+ case EvGCSTWStart:
+ evp := &evSTW
+ if p.Version < 1010 {
+ // Before 1.10, EvGCSTWStart was per-P.
+ evp = &a.px.evSTW
+ }
+ if *evp != nil {
+ // still running; doesn't happen
+ a.delete(i, ev)
+ continue
+ }
+ *evp = ev
+ case EvGCSTWDone:
+ evp := &evSTW
+ if p.Version < 1010 {
+ // Before 1.10, EvGCSTWDone was per-P.
+ evp = &a.px.evSTW
+ }
+ if *evp == nil {
+ // no STWStart to link to: choice is lying about
+ // duration or the existence. Do the latter.
+ a.delete(i, ev)
+ continue
+ }
+ *evp = nil
+ case EvGCMarkAssistStart:
+ if a.g.evMarkAssist != nil {
+ // already running; doesn't happen
+ a.delete(i, ev)
+ continue
+ }
+ a.g.evMarkAssist = ev
+ case EvGCMarkAssistDone:
+ // ok to be in progress
+ a.g.evMarkAssist = nil
+ case EvGCSweepStart:
+ if a.px.evSweep != nil {
+ // older one still running; doesn't happen
+ a.delete(i, ev)
+ continue
+ }
+ a.px.evSweep = ev
+ case EvGCSweepDone:
+ if a.px.evSweep == nil {
+ // no Start to link to: choice is lying about
+ // duration or existence. Do the latter.
+ a.delete(i, ev)
+ continue
+ }
+ a.px.evSweep = nil
+ case EvGoWaiting:
+ if a.g.state != gRunnable {
+ a.makerunnable(i, ev)
+ }
+ a.g.state = gWaiting
+ a.g.ev = ev
+ case EvGoInSyscall: // PJW: same as GoWaiting
+ if a.g.state != gRunnable {
+ a.makerunnable(i, ev)
+ }
+ a.g.state = gWaiting
+ a.g.ev = ev
+ case EvGoCreate:
+ if _, ok := a.gs[ev.Args[0]]; ok {
+ // this g already exists; doesn't happen
+ a.delete(i, ev)
+ continue
+ }
+ ret := checkRunning(a.px, a.g, ev, true)
+ if ret&badRunning != 0 {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ret&badP != 0 {
+ a.procstart(ev.P)
+ }
+ a.gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev,
+ evCreate: ev}
+ case EvGoStart, EvGoStartLabel:
+ if a.g.state != gRunnable {
+ a.makerunnable(i, ev)
+ }
+ if a.px.g != 0 {
+ //a.prevs(i)
+ a.err = fmt.Errorf("p already running %d, %d:%s",
+ a.px.g, i, ev)
+ }
+ a.g.state = gRunning
+ a.g.evStart = ev // PJW: do we need g.evStart?
+ a.px.g = ev.G
+ a.g.evCreate = nil // PJW: do we need g.evCreate?
+ case EvGoEnd, EvGoStop:
+ if !gok {
+ // never heard of it; act as if it never existed
+ a.delete(i, ev)
+ continue
+ }
+ ret := checkRunning(a.px, a.g, ev, false)
+ if ret&badRunning != 0 {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ret&badP != 0 {
+ a.procstart(ev.P)
+ }
+ if ret&badG0 != 0 {
+ // gok should have been false
+ panic(fmt.Sprintf("badG0 %d:%s", i, ev))
+ }
+ a.g.evStart = nil
+ a.g.state = gDead
+ a.px.g = 0
+ case EvGoSched, EvGoPreempt:
+ ret := checkRunning(a.px, a.g, ev, false)
+ if ret&badG0 != 0 {
+ // hopeless, we think. Don't know g
+ a.delete(i, ev)
+ }
+ if ret&badRunning != 0 {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ret&badP != 0 {
+ a.procstart(ev.P)
+ }
+ a.g.state = gRunnable
+ a.g.evStart = nil
+ a.px.g = 0
+ a.g.ev = ev
+ case EvGoUnblock:
+ // g == 0 is ok here (PJW) and elsewhere?
+ if a.g.state != gRunning {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ev.P != TimerP && a.px.g != ev.G {
+ //a.prevs(i)
+ a.err = fmt.Errorf("%v not running %d:%s",
+ a.px, i, ev)
+ continue
+ }
+ g1, _ := a.gs[ev.Args[0]]
+ if g1.state != gWaiting {
+ a.makewaiting(i, ev.Args[0], EvGoWaiting)
+ g1.state = gWaiting
+ }
+ g1.state = gRunnable
+ g1.ev = ev
+ a.gs[ev.Args[0]] = g1
+ // if p == TimerP, clean up from events.go:71
+ ev.Args[2] = 0 // no point in checking p
+ case EvGoSysCall:
+ if ev.G == 0 {
+ // hopeless; don't know how to repair
+ a.delete(i, ev)
+ continue
+ }
+ ret := checkRunning(a.px, a.g, ev, false)
+ if ret&badRunning != 0 {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ret&badP != 0 {
+ a.procstart(ev.P)
+ }
+ a.g.ev = ev
+ case EvGoSysBlock:
+ if ev.G == 0 {
+ // hopeless to repair
+ a.delete(i, ev)
+ }
+ ret := checkRunning(a.px, a.g, ev, false)
+ if ret&badRunning != 0 {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ret&badP != 0 {
+ a.procstart(ev.P)
+ }
+ a.g.state = gWaiting
+ a.g.evStart = nil
+ a.px.g = 0
+ case EvGoSysExit:
+ if ev.G == 0 {
+ // don't know how to repair
+ a.delete(i, ev)
+ continue
+ }
+ if a.g.state != gWaiting {
+ a.makewaiting(i, ev.G, EvGoInSyscall)
+ }
+ a.g.state = gRunnable
+ a.g.ev = ev
+ case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
+ EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond,
+ EvGoBlockNet, EvGoBlockGC:
+ if ev.G == 0 { // don't know how to repair
+ a.delete(i, ev)
+ continue
+ }
+ ret := checkRunning(a.px, a.g, ev, false)
+ if ret&badRunning != 0 {
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ }
+ if ret&badP != 0 {
+ a.procstart(ev.P)
+ }
+ a.g.state = gWaiting
+ a.g.ev = ev
+ a.g.evStart = nil
+ a.px.g = 0
+ case EvHeapAlloc, EvGomaxprocs, EvNextGC, EvUserLog:
+ a.makerunning(i, ev)
+ a.g.state = gRunning
+ a.px.g = ev.G
+ default:
+ return nil, nil, fmt.Errorf("robust: unexpected %d:%s", i, ev)
+ case EvUserTaskCreate:
+ taskid := ev.Args[0]
+ if _, ok := tasks[taskid]; ok {
+ // task id conflict, kill this one, believe the earlier one
+ a.delete(i, ev)
+ continue
+ }
+ tasks[ev.Args[0]] = ev
+ case EvUserTaskEnd: // nothing to do
+ case EvUserRegion:
+ mode := ev.Args[1]
+ spans := activeSpans[ev.G]
+ if mode == 0 {
+ activeSpans[ev.G] = append(spans, ev)
+ } else if mode == 1 { // span end
+ n := len(spans)
+ if n > 0 {
+ // check that spans match up; clean up if not
+ s := spans[n-1]
+ if s.Args[0] != ev.Args[0] ||
+ s.SArgs[0] != ev.SArgs[0] {
+ // try to fix it
+ var ok bool
+ spans, ok = fixSpan(spans, ev)
+ if !ok {
+ // unfixed, toss this event
+ a.delete(i, ev)
+ continue
+ }
+ }
+ n = len(spans)
+ if n > 1 {
+ activeSpans[ev.G] = spans[:n-1]
+ } else {
+ delete(activeSpans, ev.G)
+ }
+ }
+ } else {
+ // invalid mode, toss it
+ a.delete(i, ev)
+ continue
+ }
+ }
+ a.gs[ev.G] = a.g
+ a.ps[ev.P] = a.px
+ a.evs = append(a.evs, ev)
+ }
+ ans := a.pref
+ ans = append(ans, a.evs...)
+ p.Preflen = len(a.pref)
+ p.Added = len(a.inserted)
+ p.Ignored = len(a.deleted)
+ return ans, a, a.err
+}
+
+func fixSpan(spans []*Event, ev *Event) ([]*Event, bool) {
+ // probably indicates a corrupt trace file
+ panic("implement")
+}
+
+type same struct {
+ ev *Event
+ g gdesc
+ p pdesc
+}
+
+// This is a placeholder, to organize intervals with equal time stamps
+func (a *aux) reorder(n int, events []*Event) {
+ // bunch of Events with equal time stamps
+ // We care about GoCreate, GoWaiting, GoInSyscall,
+ // GoStart (StartLocal, StartLabel), GoBlock*,
+ // GosSched, GoPreempt, GoUnblock, GoSysExit,
+ // (UnblockLocal, SysExitLocal), GCStart.
+ // maybe ProcStart and ProcStop?
+ repair := []same{}
+ i := n
+ for ; i < len(events) && events[i].Ts == events[n].Ts; i++ {
+ ev := events[i]
+ repair = append(repair, same{ev, a.gs[ev.G],
+ a.ps[ev.P]})
+ }
+ a.last = i - 1
+ log.Println("BEFORE:")
+ for i, r := range repair {
+ log.Printf("x%d:%s %v %v", i+n, r.ev, r.g, r.p)
+ }
+ if true { // PJW
+ return // we're not doing anything yet
+ }
+ // sorting is not going to be enough.
+ log.Println("DID NOTHING!")
+ log.Println("after")
+ for i, r := range repair {
+ log.Printf("y%d:%s %v %v", i+n, r.ev, r.g, r.p)
+ }
+ for i, r := range repair {
+ events[n+i] = r.ev
+ }
+}
+
+// printing for debugging
+func (a *aux) prevs(n int) {
+ for i := 0; i < len(a.pref); i++ {
+ log.Printf("p%3d %s", i, a.pref[i])
+ }
+ start := 0
+ if n > 50 {
+ start = n - 50
+ }
+ for i := start; i <= n+1 && i < len(a.input); i++ {
+ log.Printf("%4d %s", i, a.input[i])
+ }
+}
diff --git a/src/internal/traceparser/testdata/06dfecf6e5dfb78e954e7892120b56bfca50af65-6.bad b/src/internal/traceparser/testdata/06dfecf6e5dfb78e954e7892120b56bfca50af65-6.bad
new file mode 100644
index 0000000000..6e9e23ee8d
Binary files /dev/null and b/src/internal/traceparser/testdata/06dfecf6e5dfb78e954e7892120b56bfca50af65-6.bad differ
diff --git a/src/internal/traceparser/testdata/0e6dd1787a6339366dac733a2f957a05d7aa3ac7-3.bad b/src/internal/traceparser/testdata/0e6dd1787a6339366dac733a2f957a05d7aa3ac7-3.bad
new file mode 100644
index 0000000000..7b723f889f
Binary files /dev/null and b/src/internal/traceparser/testdata/0e6dd1787a6339366dac733a2f957a05d7aa3ac7-3.bad differ
diff --git a/src/internal/traceparser/testdata/16970d24ef6753d71953e20d10638705bdccc3ba-2.weird b/src/internal/traceparser/testdata/16970d24ef6753d71953e20d10638705bdccc3ba-2.weird
new file mode 100644
index 0000000000..6b4b06c6ef
Binary files /dev/null and b/src/internal/traceparser/testdata/16970d24ef6753d71953e20d10638705bdccc3ba-2.weird differ
diff --git a/src/internal/traceparser/testdata/26492441b33e1bb93669f79cf3584755cc3ef7e8-2.weird b/src/internal/traceparser/testdata/26492441b33e1bb93669f79cf3584755cc3ef7e8-2.weird
new file mode 100644
index 0000000000..565a8b2fbc
Binary files /dev/null and b/src/internal/traceparser/testdata/26492441b33e1bb93669f79cf3584755cc3ef7e8-2.weird differ
diff --git a/src/internal/traceparser/testdata/2ccf452e473ded814ea880c602488637fc27e549.good b/src/internal/traceparser/testdata/2ccf452e473ded814ea880c602488637fc27e549.good
new file mode 100644
index 0000000000..9d101f8f73
Binary files /dev/null and b/src/internal/traceparser/testdata/2ccf452e473ded814ea880c602488637fc27e549.good differ
diff --git a/src/internal/traceparser/testdata/34f92cd2ae08f558c494b2ef79e80b574c9f096c-8.weird b/src/internal/traceparser/testdata/34f92cd2ae08f558c494b2ef79e80b574c9f096c-8.weird
new file mode 100644
index 0000000000..d6e9cd6a1d
Binary files /dev/null and b/src/internal/traceparser/testdata/34f92cd2ae08f558c494b2ef79e80b574c9f096c-8.weird differ
diff --git a/src/internal/traceparser/testdata/4557f81f6aae617eeec8dd920997ea27b3dda12b.weird b/src/internal/traceparser/testdata/4557f81f6aae617eeec8dd920997ea27b3dda12b.weird
new file mode 100644
index 0000000000..ae93a0e35c
Binary files /dev/null and b/src/internal/traceparser/testdata/4557f81f6aae617eeec8dd920997ea27b3dda12b.weird differ
diff --git a/src/internal/traceparser/testdata/495712b6e35ad7566869c887aa823fcbf69c0b80-1.weird b/src/internal/traceparser/testdata/495712b6e35ad7566869c887aa823fcbf69c0b80-1.weird
new file mode 100644
index 0000000000..072bc93960
Binary files /dev/null and b/src/internal/traceparser/testdata/495712b6e35ad7566869c887aa823fcbf69c0b80-1.weird differ
diff --git a/src/internal/traceparser/testdata/63cd688ddff425bbbc220fbb7bd4fa11616a8b64-1.bad b/src/internal/traceparser/testdata/63cd688ddff425bbbc220fbb7bd4fa11616a8b64-1.bad
new file mode 100644
index 0000000000..5506aa0e61
Binary files /dev/null and b/src/internal/traceparser/testdata/63cd688ddff425bbbc220fbb7bd4fa11616a8b64-1.bad differ
diff --git a/src/internal/traceparser/testdata/63df44bfc9d27851fb054ce03002e7e25f307e2f-5.weird b/src/internal/traceparser/testdata/63df44bfc9d27851fb054ce03002e7e25f307e2f-5.weird
new file mode 100644
index 0000000000..74ea28cd8e
Binary files /dev/null and b/src/internal/traceparser/testdata/63df44bfc9d27851fb054ce03002e7e25f307e2f-5.weird differ
diff --git a/src/internal/traceparser/testdata/6aa1a69b265c3092972a2a81e77fbcaa87061735-4.bad b/src/internal/traceparser/testdata/6aa1a69b265c3092972a2a81e77fbcaa87061735-4.bad
new file mode 100644
index 0000000000..af0307958e
Binary files /dev/null and b/src/internal/traceparser/testdata/6aa1a69b265c3092972a2a81e77fbcaa87061735-4.bad differ
diff --git a/src/internal/traceparser/testdata/7b82e808a6a3471352a4197d44fedbe3f5fb6f77-1.bad b/src/internal/traceparser/testdata/7b82e808a6a3471352a4197d44fedbe3f5fb6f77-1.bad
new file mode 100644
index 0000000000..5353946cf8
Binary files /dev/null and b/src/internal/traceparser/testdata/7b82e808a6a3471352a4197d44fedbe3f5fb6f77-1.bad differ
diff --git a/src/internal/traceparser/testdata/94347dc6ca9c22daec04c5f2530b16ea60bb0ba2-7.weird b/src/internal/traceparser/testdata/94347dc6ca9c22daec04c5f2530b16ea60bb0ba2-7.weird
new file mode 100644
index 0000000000..3141a88aba
Binary files /dev/null and b/src/internal/traceparser/testdata/94347dc6ca9c22daec04c5f2530b16ea60bb0ba2-7.weird differ
diff --git a/src/internal/traceparser/testdata/9fa93c88557e64b0714b8849aacf713d17ff928e-2.weird b/src/internal/traceparser/testdata/9fa93c88557e64b0714b8849aacf713d17ff928e-2.weird
new file mode 100644
index 0000000000..c33f6cac57
Binary files /dev/null and b/src/internal/traceparser/testdata/9fa93c88557e64b0714b8849aacf713d17ff928e-2.weird differ
diff --git a/src/internal/traceparser/testdata/abf7185aaf1cb69fb5fae50ba8546a7cdefade57-2.weird b/src/internal/traceparser/testdata/abf7185aaf1cb69fb5fae50ba8546a7cdefade57-2.weird
new file mode 100644
index 0000000000..2580a591dd
Binary files /dev/null and b/src/internal/traceparser/testdata/abf7185aaf1cb69fb5fae50ba8546a7cdefade57-2.weird differ
diff --git a/src/internal/traceparser/testdata/d28fcef078c7dc722867d781b1fd7f37ca965372-7.weird b/src/internal/traceparser/testdata/d28fcef078c7dc722867d781b1fd7f37ca965372-7.weird
new file mode 100644
index 0000000000..7d724caddc
Binary files /dev/null and b/src/internal/traceparser/testdata/d28fcef078c7dc722867d781b1fd7f37ca965372-7.weird differ
diff --git a/src/internal/traceparser/testdata/d70f178a3813df03f2aed0d47f6d9bc844b8cb57-4.weird b/src/internal/traceparser/testdata/d70f178a3813df03f2aed0d47f6d9bc844b8cb57-4.weird
new file mode 100644
index 0000000000..9976163b7b
Binary files /dev/null and b/src/internal/traceparser/testdata/d70f178a3813df03f2aed0d47f6d9bc844b8cb57-4.weird differ
diff --git a/src/internal/traceparser/testdata/e68c3126700dda2c2ac3b8743e9f319cb313042a-1.weird b/src/internal/traceparser/testdata/e68c3126700dda2c2ac3b8743e9f319cb313042a-1.weird
new file mode 100644
index 0000000000..49ac545692
Binary files /dev/null and b/src/internal/traceparser/testdata/e68c3126700dda2c2ac3b8743e9f319cb313042a-1.weird differ
diff --git a/src/internal/traceparser/tr.go b/src/internal/traceparser/tr.go
new file mode 100644
index 0000000000..770f280607
--- /dev/null
+++ b/src/internal/traceparser/tr.go
@@ -0,0 +1,498 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package traceparser parses the trace files produced by runtime.StartTrace
+package traceparser
+
+import (
+ "fmt"
+ "internal/traceparser/filebuf"
+ "io"
+ "strings"
+)
+
+// Parsed is the result of parsing a trace file
+type Parsed struct {
+ // Set by New
+ Name string // File's name
+ Size int64 // File's size
+ Count int64 // approximate number of all events
+ MaxTs int64 // range of all events, in nanoseconds
+ Strings map[uint64]string
+ Stacks map[uint32][]*Frame
+ Version int // version of the trace file from header
+ TicksPerSec int64 // from EvFrequency in trailer
+ minticks, maxticks int64 // from Init
+ r filebuf.Buf // implements io.Seek and io.Read
+ batches []batch // location of each Batch and time of start
+ timerGoids map[uint64]bool
+ // the following are per Parse
+ MinWant, MaxWant int64 // range wanted, from the arguments to Parse()
+ Err error // set by internal functions to stop further processing
+ Events []*Event // after Parse, the events from MinWant to MaxWant
+ Preflen int // how long a prefix we added
+ Ignored int // how many events we elided
+ Added int // how many events we added, not including the prefix
+ // internal processing variables
+ seenArgs map[uint64]*[]uint64
+ byproc map[int][]*Event
+ lastGs map[int]uint64
+ lastG uint64
+ lastP int
+ lastTs int64
+}
+
+func (p *Parsed) String() string {
+ ans := []string{}
+ ans = append(ans, fmt.Sprintf("%s Sz:%d Count:%d MaxTs:%d #strs:%d #stks:%d",
+ p.Name, p.Size, p.Count, p.MaxTs, len(p.Strings), len(p.Stacks)))
+ ans = append(ans, fmt.Sprintf("%d clock:%d ticks:(%d,%d) #b:%d",
+ p.Version, p.TicksPerSec, p.minticks, p.maxticks, len(p.batches)))
+ return strings.Join(ans, "\n\t")
+}
+
+// clean up after previous call to Parse
+func (p *Parsed) clean() {
+ // some of these are redundant
+ p.Err = nil
+ p.Events = nil
+ p.Preflen = 0
+ p.Ignored = 0
+ p.Added = 0
+ p.seenArgs = nil // redundant, but safe
+ p.byproc = nil
+ p.lastGs = nil
+ p.lastG = 0
+ p.lastTs = 0
+}
+
+// Frame is a frame in a stack traces
+type Frame struct {
+ PC uint64
+ Fn string
+ File string
+ Line int
+}
+
+// An Event is the parsed form of a single trace event
+type Event struct {
+ Type byte
+ P int32
+ Ts int64
+ G uint64
+ StkID uint32 // key to Parsed.Stacks
+ Args [3]uint64
+ SArgs []string // EvUserLog has 2. Others are 1 or none
+ Link *Event
+}
+
+// Batch remembers the EvBatch events. PJW: keep an index of User events?
+type batch struct {
+ Off int
+ P int64
+ Cycles int64 // as read from EvBatch
+ Nano int64 // start time of batch, set in commonInit()
+ raws []rawEvent // filled in during Parse() for those batches that overlap the desired interval
+}
+
+// rawEvent is a raw event parsed from batches that overlap the time interval
+type rawEvent struct { // about 75 bytes
+ // the choice of what to share (args) and what to make unique per rawEvent
+ // (arg0, sarg) was done by measuring the space impact of various choices.
+ off uint32 // offset in batch (at batch.Off + off in file)
+ typ byte
+ arg0 uint64
+ args *[]uint64 // remainder of the args (frequently nil), shared
+ sarg string
+}
+
+func (r rawEvent) String() string {
+ if r.args != nil && len(*r.args) > 0 {
+ return fmt.Sprintf("[%s %d %v %s]", evname(r.typ), r.arg0, *r.args, r.sarg)
+ }
+ return fmt.Sprintf("[%s, %d, [], %s]", evname(r.typ), r.arg0, r.sarg)
+}
+
+// New scans the trace file, finding the number of events, the earliest and latest
+// timestamps, and the stacks and strings referenced in the file.
+func New(fname string) (*Parsed, error) {
+ fd, err := filebuf.New(fname)
+ if err != nil {
+ return nil, err
+ }
+ return commonInit(fd, fname)
+}
+
+// ParseError may be returned by New() or ParseBuffer() to make available
+// some information in the case that the raw trace file seems to contain
+// negative time stamps. (In P, Name, Size, count, Strings, Stacks, Versions are valid,
+// and MaxTs or TicksPerSec is negative.)
+type ParseError struct {
+ P *Parsed
+ Err error
+}
+
+func (pe ParseError) Error() string {
+ return pe.Err.Error()
+}
+
+func commonInit(fd filebuf.Buf, fname string) (*Parsed, error) {
+ ans := &Parsed{Name: fname, minticks: 1 << 62} // minticks can only decrease
+ var err error
+ defer func() {
+ if err != nil {
+ fd.Close() // try to clean up after error
+ }
+ }()
+ ans.Size = fd.Size()
+ ans.r = fd
+ // parseRaw here for header, trailer: clock, stacks, strings,
+ if err = ans.parseHeader(); err != nil {
+ return nil, err
+ }
+ if err = ans.scanFile(); err != nil {
+ return nil, err
+ }
+ // done with seenArgs
+ ans.seenArgs = nil
+ // convert the clicks in batches to nanoseconds
+ ans.toNanoseconds()
+ if ans.MaxTs <= 0 || ans.TicksPerSec <= 0 {
+ err := ParseError{
+ P: ans,
+ Err: fmt.Errorf("corrupt trace file: negative time: (max TS=%d, ticks per sec=%d",
+ ans.MaxTs, ans.TicksPerSec),
+ }
+ return nil, err
+ }
+ return ans, nil
+}
+
+// Parse parses the events in the interval: start <= ts <= start+length.
+// f, if not nil, will be called at various stages of the parse, each identified by the string
+// argument. It could report on elapsed time, or memory usage, or whatever the user wants.
+// The number of times it is called and the contents of the string argument are both
+// changeable details of the implementation. Parse is not safe for concurrent use.
+func (p *Parsed) Parse(start, length int64, f func(string)) error {
+ p.clean()
+ if f == nil {
+ f = func(string) {} // avoid any further testing for nil
+ }
+
+ p.MinWant = start
+ p.MaxWant = start + length
+ // arrange the slice of batches by P
+ byp := map[int64][]*batch{}
+ // PJW: keep track of the order the Ps occur and use that for batchify
+ for i, b := range p.batches {
+ byp[b.P] = append(byp[b.P], &p.batches[i])
+ p.batches[i].raws = nil // reset from last call to Parse
+ }
+ // batchify the ones that overlap the time range
+ for _, v := range byp {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ var bnext *batch
+ if i < len(v)-1 {
+ bnext = v[i+1]
+ }
+ if b.Nano >= p.MaxWant {
+ // starts too late
+ continue
+ } else if b.Nano <= p.MinWant && (bnext != nil && bnext.Nano <= p.MinWant) {
+ // entirely too early
+ continue
+ }
+ err := p.batchify(b)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ f("batchify done")
+ return p.createEvents(f)
+}
+
+// ParseBuffer treats its argument as a trace file, and returns the
+// result of parsing it
+func ParseBuffer(rd io.Reader) (*Parsed, error) {
+ pr, err := filebuf.FromReader(rd)
+ if err != nil {
+ return nil, err
+ }
+ p, err := commonInit(pr, "")
+ if err != nil {
+ return nil, err
+ }
+ // need the version and the initial scan
+ err = p.Parse(0, 1<<62, nil)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// called from commonInit to compute the nanosecond when batches start
+func (p *Parsed) toNanoseconds() {
+ minCycles := p.minticks
+ freq := 1e9 / float64(p.TicksPerSec)
+ // Batches, and more to come. Don't call this twice!
+ for i, ex := range p.batches {
+ p.batches[i].Nano = int64(float64(ex.Cycles-minCycles) * freq)
+ }
+ p.MaxTs = int64(float64(p.maxticks-minCycles) * freq)
+}
+
+// argsAt returns the args of an event in the file and the offset for the next event.
+//
+// For EvString it returns off, nil, nil, and
+// for EvUserLog it ignores the string argument, which must be read by the
+// caller.
+func (p *Parsed) argsAt(off int, check byte) (int, []uint64, error) {
+ off0 := off
+ r := p.r
+ loc, err := r.Seek(int64(off), 0)
+ if err != nil {
+ panic(err)
+ }
+ var buf [1]byte
+ n, err := r.Read(buf[:])
+ if err != nil || n != 1 {
+ return 0, nil, fmt.Errorf("read failed at 0x%x, %d %v, loc=%d",
+ off, n, err, loc)
+ }
+ off += n
+ typ := buf[0] << 2 >> 2
+ narg := buf[0]>>6 + 1
+ inlineArgs := byte(4)
+
+ if typ == EvNone || typ >= EvCount ||
+ EventDescriptions[typ].MinVersion > p.Version {
+ return 0, nil, fmt.Errorf("unk type %v at offset 0x%x", typ, off0)
+ }
+ if typ == EvString { // skip, wihtout error checking
+ _, off, err = readVal(r, off)
+ var ln uint64
+ ln, off, err = readVal(r, off)
+ off += int(ln)
+ return off, nil, nil
+ }
+ args := []uint64{}
+ if narg < inlineArgs {
+ for i := 0; i < int(narg); i++ {
+ var v uint64
+ v, off, err = readVal(r, off)
+ if err != nil {
+ err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
+ return 0, nil, err
+ }
+ args = append(args, v)
+ }
+ } else {
+ // More than inlineArgs args, the first value is length of the event in bytes.
+ var v uint64
+ v, off, err = readVal(r, off)
+ if err != nil {
+ err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
+ return 0, nil, err
+ }
+ evLen := v
+ off1 := off
+ for evLen > uint64(off-off1) {
+ v, off, err = readVal(r, off)
+ if err != nil {
+ err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
+ return 0, nil, err
+ }
+ args = append(args, v)
+ }
+ if evLen != uint64(off-off1) {
+ err = fmt.Errorf("event has wrong length at offset 0x%x: want %v, got %v", off0, evLen, off-off1)
+ return 0, nil, err
+ }
+ }
+ // This routine does not read the string argument. Callers must tread EvUserLog specially.
+ return off, args, nil
+}
+
+// read a string from r
+func readStr(r io.Reader, off0 int) (s string, off int, err error) {
+ var sz uint64
+ sz, off, err = readVal(r, off0)
+ if err != nil || sz == 0 {
+ return "", off, err
+ }
+ if sz > 1e6 {
+ return "", off, fmt.Errorf("string at offset %d is too large (len=%d)", off, sz)
+ }
+ buf := make([]byte, sz)
+ n, err := io.ReadFull(r, buf)
+ if err != nil || sz != uint64(n) {
+ return "", off + n, fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, sz, err)
+ }
+ return string(buf), off + n, nil
+}
+
+// readVal reads unsigned base-128 value from r.
+func readVal(r io.Reader, off0 int) (v uint64, off int, err error) {
+ off = off0
+ for i := 0; i < 10; i++ {
+ var buf [1]byte
+ var n int
+ n, err = r.Read(buf[:])
+ if err != nil || n != 1 {
+ return 0, 0, fmt.Errorf("failed to read trace at offset %d: read %v, error %v", off0, n, err)
+ }
+ off++
+ v |= uint64(buf[0]&0x7f) << (uint(i) * 7)
+ if buf[0]&0x80 == 0 {
+ return
+ }
+ }
+ return 0, 0, fmt.Errorf("bad value at offset 0x%x", off0)
+}
+
+// OSStats reports on the underlying i/o. If p was created by New,
+// the fields report filesystem activity. If p was created by ParseBuffer,
+// only Size is set.
+func (p *Parsed) OSStats() filebuf.Stat {
+ return p.r.Stats()
+}
+
+func (ev *Event) String() string {
+ var tslink int64
+ if ev.Link != nil {
+ tslink = ev.Link.Ts
+ }
+ return fmt.Sprintf("[g:%d p:%d %s/%d %v %v %x ->%x]",
+ ev.G, ev.P, evname(ev.Type), ev.Type,
+ ev.Args, ev.SArgs, ev.Ts, tslink)
+
+}
+
+func evname(t byte) string {
+ if t >= EvCount || t < 0 {
+ return fmt.Sprintf("typ%d?", t)
+ }
+ return EventDescriptions[t].Name
+}
+
+// Close the underlying file.
+func (p *Parsed) Close() error {
+ return p.r.Close()
+}
+
+// Event types in the trace.
+// Verbatim copy from src/runtime/trace.go with the "trace" prefix removed.
+const (
+ EvNone = 0 // unused
+ EvBatch = 1 // start of per-P batch of events [pid, timestamp]
+ EvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
+ EvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
+ EvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
+ EvProcStart = 5 // start of P [timestamp, thread id]
+ EvProcStop = 6 // stop of P [timestamp]
+ EvGCStart = 7 // GC start [timestamp, seq, stack id]
+ EvGCDone = 8 // GC done [timestamp]
+ EvGCSTWStart = 9 // GC mark termination start [timestamp, kind]
+ EvGCSTWDone = 10 // GC mark termination done [timestamp]
+ EvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
+ EvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
+ EvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
+ EvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
+ EvGoEnd = 15 // goroutine ends [timestamp]
+ EvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
+ EvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
+ EvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
+ EvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
+ EvGoBlock = 20 // goroutine blocks [timestamp, stack]
+ EvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
+ EvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
+ EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
+ EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
+ EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
+ EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
+ EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
+ EvGoSysCall = 28 // syscall enter [timestamp, stack]
+ EvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
+ EvGoSysBlock = 30 // syscall blocks [timestamp]
+ EvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
+ EvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
+ EvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
+ EvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
+ EvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
+ EvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
+ EvString = 37 // string dictionary entry [ID, length, string]
+ EvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
+ EvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
+ EvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
+ EvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
+ EvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
+ EvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
+ EvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
+ EvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent id, stack, name string]
+ EvUserTaskEnd = 46 // end of task [timestamp, internal task id, stack]
+ EvUserRegion = 47 // trace.WithSpan [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
+ EvUserLog = 48 // trace.Log [timestamp, internal id, key string id, stack, value string]
+ EvCount = 49
+)
+
+// EventDescriptions describe the Events
+var EventDescriptions = [EvCount]struct {
+ Name string
+ MinVersion int
+ Stack bool
+ Args []string
+ SArgs []string // string arguments
+}{
+ EvNone: {"None", 1005, false, []string{}, nil},
+ EvBatch: {"Batch", 1005, false, []string{"p", "ticks"}, nil}, // in 1.5 format it was {"p", "seq", "ticks"}
+ EvFrequency: {"Frequency", 1005, false, []string{"freq"}, nil}, // in 1.5 format it was {"freq", "unused"}
+ EvStack: {"Stack", 1005, false, []string{"id", "siz"}, nil},
+ EvGomaxprocs: {"Gomaxprocs", 1005, true, []string{"procs"}, nil},
+ EvProcStart: {"ProcStart", 1005, false, []string{"thread"}, nil},
+ EvProcStop: {"ProcStop", 1005, false, []string{}, nil},
+ EvGCStart: {"GCStart", 1005, true, []string{"seq"}, nil}, // in 1.5 format it was {}
+ EvGCDone: {"GCDone", 1005, false, []string{}, nil},
+ EvGCSTWStart: {"GCSTWStart", 1005, false, []string{"kindid"}, []string{"kind"}}, // <= 1.9, args was {} (implicitly {0})
+ EvGCSTWDone: {"GCSTWDone", 1005, false, []string{}, nil},
+ EvGCSweepStart: {"GCSweepStart", 1005, true, []string{}, nil},
+ EvGCSweepDone: {"GCSweepDone", 1005, false, []string{"swept", "reclaimed"}, nil}, // before 1.9, format was {}
+ EvGoCreate: {"GoCreate", 1005, true, []string{"g", "stack"}, nil},
+ EvGoStart: {"GoStart", 1005, false, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"}
+ EvGoEnd: {"GoEnd", 1005, false, []string{}, nil},
+ EvGoStop: {"GoStop", 1005, true, []string{}, nil},
+ EvGoSched: {"GoSched", 1005, true, []string{}, nil},
+ EvGoPreempt: {"GoPreempt", 1005, true, []string{}, nil},
+ EvGoSleep: {"GoSleep", 1005, true, []string{}, nil},
+ EvGoBlock: {"GoBlock", 1005, true, []string{}, nil},
+ EvGoUnblock: {"GoUnblock", 1005, true, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"}
+ EvGoBlockSend: {"GoBlockSend", 1005, true, []string{}, nil},
+ EvGoBlockRecv: {"GoBlockRecv", 1005, true, []string{}, nil},
+ EvGoBlockSelect: {"GoBlockSelect", 1005, true, []string{}, nil},
+ EvGoBlockSync: {"GoBlockSync", 1005, true, []string{}, nil},
+ EvGoBlockCond: {"GoBlockCond", 1005, true, []string{}, nil},
+ EvGoBlockNet: {"GoBlockNet", 1005, true, []string{}, nil},
+ EvGoSysCall: {"GoSysCall", 1005, true, []string{}, nil},
+ EvGoSysExit: {"GoSysExit", 1005, false, []string{"g", "seq", "ts"}, nil},
+ EvGoSysBlock: {"GoSysBlock", 1005, false, []string{}, nil},
+ EvGoWaiting: {"GoWaiting", 1005, false, []string{"g"}, nil},
+ EvGoInSyscall: {"GoInSyscall", 1005, false, []string{"g"}, nil},
+ EvHeapAlloc: {"HeapAlloc", 1005, false, []string{"mem"}, nil},
+ EvNextGC: {"NextGC", 1005, false, []string{"mem"}, nil},
+ EvTimerGoroutine: {"TimerGoroutine", 1005, false, []string{"g"}, nil}, // in 1.5 format it was {"g", "unused"}
+ EvFutileWakeup: {"FutileWakeup", 1005, false, []string{}, nil},
+ EvString: {"String", 1007, false, []string{}, nil},
+ EvGoStartLocal: {"GoStartLocal", 1007, false, []string{"g"}, nil},
+ EvGoUnblockLocal: {"GoUnblockLocal", 1007, true, []string{"g"}, nil},
+ EvGoSysExitLocal: {"GoSysExitLocal", 1007, false, []string{"g", "ts"}, nil},
+ EvGoStartLabel: {"GoStartLabel", 1008, false, []string{"g", "seq", "labelid"}, []string{"label"}},
+ EvGoBlockGC: {"GoBlockGC", 1008, true, []string{}, nil},
+ EvGCMarkAssistStart: {"GCMarkAssistStart", 1009, true, []string{}, nil},
+ EvGCMarkAssistDone: {"GCMarkAssistDone", 1009, false, []string{}, nil},
+ EvUserTaskCreate: {"UserTaskCreate", 1011, true, []string{"taskid", "pid", "typeid"}, []string{"name"}},
+ EvUserTaskEnd: {"UserTaskEnd", 1011, true, []string{"taskid"}, nil},
+ EvUserRegion: {"UserRegion", 1011, true, []string{"taskid", "mode", "typeid"}, []string{"name"}},
+ EvUserLog: {"UserLog", 1011, true, []string{"id", "keyid"}, []string{"category", "message"}},
+}
diff --git a/src/internal/traceparser/writer.go b/src/internal/traceparser/writer.go
new file mode 100644
index 0000000000..498bed72f3
--- /dev/null
+++ b/src/internal/traceparser/writer.go
@@ -0,0 +1,52 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceparser
+
+// test routines used by tests in the trace commands
+
+import "bytes"
+
+// Writer is a test trace writer.
+type Writer struct {
+ bytes.Buffer
+}
+
+// NewWriter provides the test Writer
+func NewWriter() *Writer {
+ w := new(Writer)
+ w.Write([]byte("go 1.9 trace\x00\x00\x00\x00"))
+ return w
+}
+
+// Emit writes an event record to the trace.
+// See Event types for valid types and required arguments.
+func (w *Writer) Emit(typ byte, args ...uint64) {
+ nargs := byte(len(args)) - 1
+ if nargs > 3 {
+ nargs = 3
+ }
+ buf := []byte{typ | nargs<<6}
+ if nargs == 3 {
+ buf = append(buf, 0)
+ }
+ for _, a := range args {
+ buf = appendVarint(buf, a)
+ }
+ if nargs == 3 {
+ buf[1] = byte(len(buf) - 2)
+ }
+ n, err := w.Write(buf)
+ if n != len(buf) || err != nil {
+ panic("failed to write")
+ }
+}
+
+func appendVarint(buf []byte, v uint64) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ buf = append(buf, 0x80|byte(v))
+ }
+ buf = append(buf, byte(v))
+ return buf
+}
diff --git a/src/math/big/float.go b/src/math/big/float.go
index 55b93c8915..6b0cb3f1ed 100644
--- a/src/math/big/float.go
+++ b/src/math/big/float.go
@@ -293,7 +293,7 @@ func (z *Float) setExpAndRound(exp int64, sbit uint) {
z.round(sbit)
}
-// SetMantExp sets z to mant × 2**exp and and returns z.
+// SetMantExp sets z to mant × 2**exp and returns z.
// The result z has the same precision and rounding mode
// as mant. SetMantExp is an inverse of MantExp but does
// not require 0.5 <= |mant| < 1.0. Specifically:
diff --git a/src/math/mod.go b/src/math/mod.go
index e1a414e5f9..7efc018a5d 100644
--- a/src/math/mod.go
+++ b/src/math/mod.go
@@ -24,16 +24,12 @@ func mod(x, y float64) float64 {
if y == 0 || IsInf(x, 0) || IsNaN(x) || IsNaN(y) {
return NaN()
}
- if y < 0 {
- y = -y
- }
+ y = Abs(y)
yfr, yexp := Frexp(y)
- sign := false
r := x
if x < 0 {
r = -x
- sign = true
}
for r >= y {
@@ -43,7 +39,7 @@ func mod(x, y float64) float64 {
}
r = r - Ldexp(y, rexp-yexp)
}
- if sign {
+ if x < 0 {
r = -r
}
return r
diff --git a/src/math/pow.go b/src/math/pow.go
index 336193bce1..2219a906b8 100644
--- a/src/math/pow.go
+++ b/src/math/pow.go
@@ -83,13 +83,7 @@ func pow(x, y float64) float64 {
return 1 / Sqrt(x)
}
- absy := y
- flip := false
- if absy < 0 {
- absy = -absy
- flip = true
- }
- yi, yf := Modf(absy)
+ yi, yf := Modf(Abs(y))
if yf != 0 && x < 0 {
return NaN()
}
@@ -147,9 +141,9 @@ func pow(x, y float64) float64 {
}
// ans = a1*2**ae
- // if flip { ans = 1 / ans }
+ // if y < 0 { ans = 1 / ans }
// but in the opposite order
- if flip {
+ if y < 0 {
a1 = 1 / a1
ae = -ae
}
diff --git a/src/mime/type_unix.go b/src/mime/type_unix.go
index 6549c0f5e9..dfc1f88b2a 100644
--- a/src/mime/type_unix.go
+++ b/src/mime/type_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package mime
diff --git a/src/net/addrselect.go b/src/net/addrselect.go
index 1ab9fc5326..7c0dfe261c 100644
--- a/src/net/addrselect.go
+++ b/src/net/addrselect.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// Minimal RFC 6724 address selection.
diff --git a/src/net/conf.go b/src/net/conf.go
index 127aba30cb..971b1a399a 100644
--- a/src/net/conf.go
+++ b/src/net/conf.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package net
diff --git a/src/net/dial_test.go b/src/net/dial_test.go
index 00a84d17d6..3a45c0d2ec 100644
--- a/src/net/dial_test.go
+++ b/src/net/dial_test.go
@@ -318,9 +318,9 @@ func TestDialParallel(t *testing.T) {
expectElapsedMin := tt.expectElapsed - 95*time.Millisecond
expectElapsedMax := tt.expectElapsed + 95*time.Millisecond
- if !(elapsed >= expectElapsedMin) {
+ if elapsed < expectElapsedMin {
t.Errorf("#%d: got %v; want >= %v", i, elapsed, expectElapsedMin)
- } else if !(elapsed <= expectElapsedMax) {
+ } else if elapsed > expectElapsedMax {
t.Errorf("#%d: got %v; want <= %v", i, elapsed, expectElapsedMax)
}
@@ -418,10 +418,10 @@ func TestDialerFallbackDelay(t *testing.T) {
}
expectMin := tt.expectElapsed - 1*time.Millisecond
expectMax := tt.expectElapsed + 95*time.Millisecond
- if !(elapsed >= expectMin) {
+ if elapsed < expectMin {
t.Errorf("#%d: got %v; want >= %v", i, elapsed, expectMin)
}
- if !(elapsed <= expectMax) {
+ if elapsed > expectMax {
t.Errorf("#%d: got %v; want <= %v", i, elapsed, expectMax)
}
}
diff --git a/src/net/dial_unix_test.go b/src/net/dial_unix_test.go
index 0adc10d0bd..3cfc9d81b8 100644
--- a/src/net/dial_unix_test.go
+++ b/src/net/dial_unix_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package net
diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go
index 9a0b1d69a8..3b0293025d 100644
--- a/src/net/dnsclient_unix.go
+++ b/src/net/dnsclient_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// DNS client: see RFC 1035.
// Has to be linked into package net for Dial.
diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go
index 9482fc466f..7dccb6b8ec 100644
--- a/src/net/dnsclient_unix_test.go
+++ b/src/net/dnsclient_unix_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package net
diff --git a/src/net/dnsconfig_unix.go b/src/net/dnsconfig_unix.go
index 64c66f96b8..842d408e56 100644
--- a/src/net/dnsconfig_unix.go
+++ b/src/net/dnsconfig_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// Read system DNS config from /etc/resolv.conf
diff --git a/src/net/dnsconfig_unix_test.go b/src/net/dnsconfig_unix_test.go
index 37bdeb04c8..0797559d1a 100644
--- a/src/net/dnsconfig_unix_test.go
+++ b/src/net/dnsconfig_unix_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package net
diff --git a/src/net/error_posix.go b/src/net/error_posix.go
index 0000700809..70efa4c66f 100644
--- a/src/net/error_posix.go
+++ b/src/net/error_posix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows
package net
diff --git a/src/net/error_unix.go b/src/net/error_unix.go
index b5a5829eaa..e615330388 100644
--- a/src/net/error_unix.go
+++ b/src/net/error_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js linux netbsd openbsd solaris
package net
diff --git a/src/net/fd_unix.go b/src/net/fd_unix.go
index 055ecf0336..e7ab9a45fd 100644
--- a/src/net/fd_unix.go
+++ b/src/net/fd_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package net
diff --git a/src/net/file_unix.go b/src/net/file_unix.go
index 676798d693..452a079bfc 100644
--- a/src/net/file_unix.go
+++ b/src/net/file_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package net
diff --git a/src/net/hook_unix.go b/src/net/hook_unix.go
index d672bd01b0..a1568319f3 100644
--- a/src/net/hook_unix.go
+++ b/src/net/hook_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package net
diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
index 1dddaa95a7..f82d820a43 100644
--- a/src/net/http/httputil/reverseproxy.go
+++ b/src/net/http/httputil/reverseproxy.go
@@ -18,10 +18,6 @@ import (
"time"
)
-// onExitFlushLoop is a callback set by tests to detect the state of the
-// flushLoop() goroutine.
-var onExitFlushLoop func()
-
// ReverseProxy is an HTTP Handler that takes an incoming request and
// sends it to another server, proxying the response back to the
// client.
@@ -42,6 +38,12 @@ type ReverseProxy struct {
// to flush to the client while copying the
// response body.
// If zero, no periodic flushing is done.
+ // A negative value means to flush immediately
+ // after each write to the client.
+ // The FlushInterval is ignored when ReverseProxy
+ // recognizes a response as a streaming response;
+ // for such reponses, writes are flushed to the client
+ // immediately.
FlushInterval time.Duration
// ErrorLog specifies an optional logger for errors
@@ -271,7 +273,7 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
fl.Flush()
}
}
- err = p.copyResponse(rw, res.Body)
+ err = p.copyResponse(rw, res.Body, p.flushInterval(req, res))
if err != nil {
defer res.Body.Close()
// Since we're streaming the response, if we run into an error all we can do
@@ -332,15 +334,28 @@ func removeConnectionHeaders(h http.Header) {
}
}
-func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) error {
- if p.FlushInterval != 0 {
+// flushInterval returns the p.FlushInterval value, conditionally
+// overriding its value for a specific request/response.
+func (p *ReverseProxy) flushInterval(req *http.Request, res *http.Response) time.Duration {
+ resCT := res.Header.Get("Content-Type")
+
+ // For Server-Sent Events responses, flush immediately.
+ // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream
+ if resCT == "text/event-stream" {
+ return -1 // negative means immediately
+ }
+
+ // TODO: more specific cases? e.g. res.ContentLength == -1?
+ return p.FlushInterval
+}
+
+func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error {
+ if flushInterval != 0 {
if wf, ok := dst.(writeFlusher); ok {
mlw := &maxLatencyWriter{
dst: wf,
- latency: p.FlushInterval,
- done: make(chan bool),
+ latency: flushInterval,
}
- go mlw.flushLoop()
defer mlw.stop()
dst = mlw
}
@@ -403,34 +418,48 @@ type writeFlusher interface {
type maxLatencyWriter struct {
dst writeFlusher
- latency time.Duration
+ latency time.Duration // non-zero; negative means to flush immediately
- mu sync.Mutex // protects Write + Flush
- done chan bool
+ mu sync.Mutex // protects t, flushPending, and dst.Flush
+ t *time.Timer
+ flushPending bool
}
-func (m *maxLatencyWriter) Write(p []byte) (int, error) {
+func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
m.mu.Lock()
defer m.mu.Unlock()
- return m.dst.Write(p)
+ n, err = m.dst.Write(p)
+ if m.latency < 0 {
+ m.dst.Flush()
+ return
+ }
+ if m.flushPending {
+ return
+ }
+ if m.t == nil {
+ m.t = time.AfterFunc(m.latency, m.delayedFlush)
+ } else {
+ m.t.Reset(m.latency)
+ }
+ m.flushPending = true
+ return
}
-func (m *maxLatencyWriter) flushLoop() {
- t := time.NewTicker(m.latency)
- defer t.Stop()
- for {
- select {
- case <-m.done:
- if onExitFlushLoop != nil {
- onExitFlushLoop()
- }
- return
- case <-t.C:
- m.mu.Lock()
- m.dst.Flush()
- m.mu.Unlock()
- }
+func (m *maxLatencyWriter) delayedFlush() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
+ return
+ }
+ m.dst.Flush()
+ m.flushPending = false
+}
+
+func (m *maxLatencyWriter) stop() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.flushPending = false
+ if m.t != nil {
+ m.t.Stop()
}
}
-
-func (m *maxLatencyWriter) stop() { m.done <- true }
diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go
index 2f75b4e34e..ddae11b168 100644
--- a/src/net/http/httputil/reverseproxy_test.go
+++ b/src/net/http/httputil/reverseproxy_test.go
@@ -297,10 +297,6 @@ func TestReverseProxyFlushInterval(t *testing.T) {
proxyHandler := NewSingleHostReverseProxy(backendURL)
proxyHandler.FlushInterval = time.Microsecond
- done := make(chan bool)
- onExitFlushLoop = func() { done <- true }
- defer func() { onExitFlushLoop = nil }()
-
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
@@ -314,13 +310,6 @@ func TestReverseProxyFlushInterval(t *testing.T) {
if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected {
t.Errorf("got body %q; expected %q", bodyBytes, expected)
}
-
- select {
- case <-done:
- // OK
- case <-time.After(5 * time.Second):
- t.Error("maxLatencyWriter flushLoop() never exited")
- }
}
func TestReverseProxyCancelation(t *testing.T) {
@@ -946,3 +935,48 @@ func TestReverseProxy_PanicBodyError(t *testing.T) {
req, _ := http.NewRequest("GET", "http://foo.tld/", nil)
rproxy.ServeHTTP(httptest.NewRecorder(), req)
}
+
+func TestSelectFlushInterval(t *testing.T) {
+ tests := []struct {
+ name string
+ p *ReverseProxy
+ req *http.Request
+ res *http.Response
+ want time.Duration
+ }{
+ {
+ name: "default",
+ res: &http.Response{},
+ p: &ReverseProxy{FlushInterval: 123},
+ want: 123,
+ },
+ {
+ name: "server-sent events overrides non-zero",
+ res: &http.Response{
+ Header: http.Header{
+ "Content-Type": {"text/event-stream"},
+ },
+ },
+ p: &ReverseProxy{FlushInterval: 123},
+ want: -1,
+ },
+ {
+ name: "server-sent events overrides zero",
+ res: &http.Response{
+ Header: http.Header{
+ "Content-Type": {"text/event-stream"},
+ },
+ },
+ p: &ReverseProxy{FlushInterval: 0},
+ want: -1,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := tt.p.flushInterval(tt.req, tt.res)
+ if got != tt.want {
+ t.Errorf("flushLatency = %v; want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/net/http/request.go b/src/net/http/request.go
index 967de7917f..3669f17f66 100644
--- a/src/net/http/request.go
+++ b/src/net/http/request.go
@@ -545,6 +545,9 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
} else if r.Method == "CONNECT" && r.URL.Path == "" {
// CONNECT requests normally give just the host and port, not a full URL.
ruri = host
+ if r.URL.Opaque != "" {
+ ruri = r.URL.Opaque
+ }
}
// TODO(bradfitz): escape at least newlines in ruri?
diff --git a/src/net/http/requestwrite_test.go b/src/net/http/requestwrite_test.go
index eb65b9f736..246fb4e65d 100644
--- a/src/net/http/requestwrite_test.go
+++ b/src/net/http/requestwrite_test.go
@@ -512,6 +512,38 @@ var reqWriteTests = []reqWriteTest{
"User-Agent: Go-http-client/1.1\r\n" +
"\r\n",
},
+
+ // CONNECT without Opaque
+ 21: {
+ Req: Request{
+ Method: "CONNECT",
+ URL: &url.URL{
+ Scheme: "https", // of proxy.com
+ Host: "proxy.com",
+ },
+ },
+ // What we used to do, locking that behavior in:
+ WantWrite: "CONNECT proxy.com HTTP/1.1\r\n" +
+ "Host: proxy.com\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
+ "\r\n",
+ },
+
+ // CONNECT with Opaque
+ 22: {
+ Req: Request{
+ Method: "CONNECT",
+ URL: &url.URL{
+ Scheme: "https", // of proxy.com
+ Host: "proxy.com",
+ Opaque: "backend:443",
+ },
+ },
+ WantWrite: "CONNECT backend:443 HTTP/1.1\r\n" +
+ "Host: proxy.com\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
+ "\r\n",
+ },
}
func TestRequestWrite(t *testing.T) {
diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go
index 2c6ba3231b..f0b43844dd 100644
--- a/src/net/http/transfer.go
+++ b/src/net/http/transfer.go
@@ -184,6 +184,9 @@ func (t *transferWriter) shouldSendChunkedRequestBody() bool {
if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them
return false
}
+ if t.Method == "CONNECT" {
+ return false
+ }
if requestMethodUsuallyLacksBody(t.Method) {
// Only probe the Request.Body for GET/HEAD/DELETE/etc
// requests, because it's only those types of requests
@@ -357,7 +360,11 @@ func (t *transferWriter) writeBody(w io.Writer) error {
err = cw.Close()
}
} else if t.ContentLength == -1 {
- ncopy, err = io.Copy(w, body)
+ dst := w
+ if t.Method == "CONNECT" {
+ dst = bufioFlushWriter{dst}
+ }
+ ncopy, err = io.Copy(dst, body)
} else {
ncopy, err = io.Copy(w, io.LimitReader(body, t.ContentLength))
if err != nil {
@@ -942,7 +949,7 @@ func (b *body) Close() error {
// no trailer and closing the connection next.
// no point in reading to EOF.
case b.doEarlyClose:
- // Read up to maxPostHandlerReadBytes bytes of the body, looking for
+ // Read up to maxPostHandlerReadBytes bytes of the body, looking
// for EOF (and trailers), so we can re-use this connection.
if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes {
// There was a declared Content-Length, and we have more bytes remaining
@@ -1050,3 +1057,18 @@ func isKnownInMemoryReader(r io.Reader) bool {
}
return false
}
+
+// bufioFlushWriter is an io.Writer wrapper that flushes all writes
+// on its wrapped writer if it's a *bufio.Writer.
+type bufioFlushWriter struct{ w io.Writer }
+
+func (fw bufioFlushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok {
+ ferr := bw.Flush()
+ if ferr != nil && err == nil {
+ err = ferr
+ }
+ }
+ return
+}
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index b298ec6d7d..c459092cb8 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -85,15 +85,6 @@ func init() {
// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
// and call ConfigureTransport. See the package docs for more about HTTP/2.
//
-// The Transport will send CONNECT requests to a proxy for its own use
-// when processing HTTPS requests, but Transport should generally not
-// be used to send a CONNECT request. That is, the Request passed to
-// the RoundTrip method should not have a Method of "CONNECT", as Go's
-// HTTP/1.x implementation does not support full-duplex request bodies
-// being written while the response body is streamed. Go's HTTP/2
-// implementation does support full duplex, but many CONNECT proxies speak
-// HTTP/1.x.
-//
// Responses with status codes in the 1xx range are either handled
// automatically (100 expect-continue) or ignored. The one
// exception is HTTP status code 101 (Switching Protocols), which is
diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
index 739fe5f597..211f8cb467 100644
--- a/src/net/http/transport_test.go
+++ b/src/net/http/transport_test.go
@@ -4279,7 +4279,7 @@ func testTransportIdleConnTimeout(t *testing.T, h2 bool) {
}
// Issue 16208: Go 1.7 crashed after Transport.IdleConnTimeout if an
-// HTTP/2 connection was established but but its caller no longer
+// HTTP/2 connection was established but its caller no longer
// wanted it. (Assuming the connection cache was enabled, which it is
// by default)
//
@@ -4887,3 +4887,68 @@ func TestTransportResponseBodyWritableOnProtocolSwitch(t *testing.T) {
t.Errorf("read %q; want %q", got, want)
}
}
+
+func TestTransportCONNECTBidi(t *testing.T) {
+ defer afterTest(t)
+ const target = "backend:443"
+ cst := newClientServerTest(t, h1Mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+ if r.Method != "CONNECT" {
+ t.Errorf("unexpected method %q", r.Method)
+ w.WriteHeader(500)
+ return
+ }
+ if r.RequestURI != target {
+ t.Errorf("unexpected CONNECT target %q", r.RequestURI)
+ w.WriteHeader(500)
+ return
+ }
+ nc, brw, err := w.(Hijacker).Hijack()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer nc.Close()
+ nc.Write([]byte("HTTP/1.1 200 OK\r\n\r\n"))
+ // Switch to a little protocol that capitalize its input lines:
+ for {
+ line, err := brw.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ t.Error(err)
+ }
+ return
+ }
+ io.WriteString(brw, strings.ToUpper(line))
+ brw.Flush()
+ }
+ }))
+ defer cst.close()
+ pr, pw := io.Pipe()
+ defer pw.Close()
+ req, err := NewRequest("CONNECT", cst.ts.URL, pr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.URL.Opaque = target
+ res, err := cst.c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ t.Fatalf("status code = %d; want 200", res.StatusCode)
+ }
+ br := bufio.NewReader(res.Body)
+ for _, str := range []string{"foo", "bar", "baz"} {
+ fmt.Fprintf(pw, "%s\n", str)
+ got, err := br.ReadString('\n')
+ if err != nil {
+ t.Fatal(err)
+ }
+ got = strings.TrimSpace(got)
+ want := strings.ToUpper(str)
+ if got != want {
+ t.Fatalf("got %q; want %q", got, want)
+ }
+ }
+}
diff --git a/src/net/interface_aix.go b/src/net/interface_aix.go
new file mode 100644
index 0000000000..9a8b5bbdb1
--- /dev/null
+++ b/src/net/interface_aix.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "internal/syscall/unix"
+ "syscall"
+ "unsafe"
+)
+
+type rawSockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [120]byte
+}
+
+type ifreq struct {
+ Name [16]uint8
+ Ifru [16]byte
+}
+
+const _KINFO_RT_IFLIST = (0x1 << 8) | 3 | (1 << 30)
+
+const _RTAX_NETMASK = 2
+const _RTAX_IFA = 5
+const _RTAX_MAX = 8
+
+func getIfList() ([]byte, error) {
+ needed, err := syscall.Getkerninfo(_KINFO_RT_IFLIST, 0, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+ tab := make([]byte, needed)
+ _, err = syscall.Getkerninfo(_KINFO_RT_IFLIST, uintptr(unsafe.Pointer(&tab[0])), uintptr(unsafe.Pointer(&needed)), 0)
+ if err != nil {
+ return nil, err
+ }
+ return tab[:needed], nil
+}
+
+// If the ifindex is zero, interfaceTable returns mappings of all
+// network interfaces. Otherwise it returns a mapping of a specific
+// interface.
+func interfaceTable(ifindex int) ([]Interface, error) {
+ tab, err := getIfList()
+ if err != nil {
+ return nil, err
+ }
+
+ var ift []Interface
+ for len(tab) > 0 {
+ ifm := (*syscall.IfMsgHdr)(unsafe.Pointer(&tab[0]))
+ if ifm.Msglen == 0 {
+ break
+ }
+ if ifm.Type == syscall.RTM_IFINFO {
+ if ifindex == 0 || ifindex == int(ifm.Index) {
+ sdl := (*rawSockaddrDatalink)(unsafe.Pointer(&tab[syscall.SizeofIfMsghdr]))
+
+ ifi := &Interface{Index: int(ifm.Index), Flags: linkFlags(ifm.Flags)}
+ ifi.Name = string(sdl.Data[:sdl.Nlen])
+ ifi.HardwareAddr = sdl.Data[sdl.Nlen : sdl.Nlen+sdl.Alen]
+
+ // Retrieve MTU
+ ifr := &ifreq{}
+ copy(ifr.Name[:], ifi.Name)
+ sock, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, 0)
+ if err != nil {
+ return nil, err
+ }
+ err = unix.Ioctl(sock, syscall.SIOCGIFMTU, uintptr(unsafe.Pointer(ifr)))
+ if err != nil {
+ return nil, err
+ }
+ ifi.MTU = int(ifr.Ifru[0])<<24 | int(ifr.Ifru[1])<<16 | int(ifr.Ifru[2])<<8 | int(ifr.Ifru[3])
+
+ ift = append(ift, *ifi)
+ if ifindex == int(ifm.Index) {
+ break
+ }
+ }
+ }
+ tab = tab[ifm.Msglen:]
+ }
+
+ return ift, nil
+}
+
+func linkFlags(rawFlags int32) Flags {
+ var f Flags
+ if rawFlags&syscall.IFF_UP != 0 {
+ f |= FlagUp
+ }
+ if rawFlags&syscall.IFF_BROADCAST != 0 {
+ f |= FlagBroadcast
+ }
+ if rawFlags&syscall.IFF_LOOPBACK != 0 {
+ f |= FlagLoopback
+ }
+ if rawFlags&syscall.IFF_POINTOPOINT != 0 {
+ f |= FlagPointToPoint
+ }
+ if rawFlags&syscall.IFF_MULTICAST != 0 {
+ f |= FlagMulticast
+ }
+ return f
+}
+
+// If the ifi is nil, interfaceAddrTable returns addresses for all
+// network interfaces. Otherwise it returns addresses for a specific
+// interface.
+func interfaceAddrTable(ifi *Interface) ([]Addr, error) {
+ tab, err := getIfList()
+ if err != nil {
+ return nil, err
+ }
+
+ var ifat []Addr
+ for len(tab) > 0 {
+ ifm := (*syscall.IfMsgHdr)(unsafe.Pointer(&tab[0]))
+ if ifm.Msglen == 0 {
+ break
+ }
+ if ifm.Type == syscall.RTM_NEWADDR {
+ if ifi == nil || ifi.Index == int(ifm.Index) {
+ mask := ifm.Addrs
+ off := uint(syscall.SizeofIfMsghdr)
+
+ var iprsa, nmrsa *syscall.RawSockaddr
+ for i := uint(0); i < _RTAX_MAX; i++ {
+ if mask&(1< 0 {
l := len(b.jobs) - 1
// Pop job off the stack.
@@ -150,7 +163,7 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
}
Skip:
- inst := b.prog.Inst[pc]
+ inst := re.prog.Inst[pc]
switch inst.Op {
default:
@@ -172,23 +185,23 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
pc = inst.Arg
goto CheckAndLoop
} else {
- b.push(pc, pos, true)
+ b.push(re, pc, pos, true)
pc = inst.Out
goto CheckAndLoop
}
case syntax.InstAltMatch:
// One opcode consumes runes; the other leads to match.
- switch b.prog.Inst[inst.Out].Op {
+ switch re.prog.Inst[inst.Out].Op {
case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
// inst.Arg is the match.
- b.push(inst.Arg, pos, false)
+ b.push(re, inst.Arg, pos, false)
pc = inst.Arg
pos = b.end
goto CheckAndLoop
}
// inst.Out is the match - non-greedy
- b.push(inst.Out, b.end, false)
+ b.push(re, inst.Out, b.end, false)
pc = inst.Out
goto CheckAndLoop
@@ -236,7 +249,7 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
} else {
if 0 <= inst.Arg && inst.Arg < uint32(len(b.cap)) {
// Capture pos to register, but save old value.
- b.push(pc, b.cap[inst.Arg], true) // come back when we're done.
+ b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done.
b.cap[inst.Arg] = pos
}
pc = inst.Out
@@ -244,7 +257,8 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
}
case syntax.InstEmptyWidth:
- if syntax.EmptyOp(inst.Arg)&^i.context(pos) != 0 {
+ flag := i.context(pos)
+ if !flag.match(syntax.EmptyOp(inst.Arg)) {
continue
}
pc = inst.Out
@@ -258,8 +272,7 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
// We found a match. If the caller doesn't care
// where the match is, no point going further.
if len(b.cap) == 0 {
- m.matched = true
- return m.matched
+ return true
}
// Record best match so far.
@@ -268,19 +281,18 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
if len(b.cap) > 1 {
b.cap[1] = pos
}
- if !m.matched || (longest && pos > 0 && pos > m.matchcap[1]) {
- copy(m.matchcap, b.cap)
+ if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) {
+ copy(b.matchcap, b.cap)
}
- m.matched = true
// If going for first match, we're done.
if !longest {
- return m.matched
+ return true
}
// If we used the entire text, no longer match is possible.
if pos == b.end {
- return m.matched
+ return true
}
// Otherwise, continue on in hope of a longer match.
@@ -288,65 +300,68 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
}
}
- return m.matched
+ return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0
}
// backtrack runs a backtracking search of prog on the input starting at pos.
-func (m *machine) backtrack(i input, pos int, end int, ncap int) bool {
- if !i.canCheckPrefix() {
- panic("backtrack called for a RuneReader")
- }
-
- startCond := m.re.cond
+func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int {
+ startCond := re.cond
if startCond == ^syntax.EmptyOp(0) { // impossible
- return false
+ return nil
}
if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
// Anchored match, past beginning of text.
- return false
+ return nil
}
- b := m.b
- b.reset(end, ncap)
-
- m.matchcap = m.matchcap[:ncap]
- for i := range m.matchcap {
- m.matchcap[i] = -1
- }
+ b := newBitState()
+ i, end := b.inputs.init(nil, ib, is)
+ b.reset(re.prog, end, ncap)
// Anchored search must start at the beginning of the input
if startCond&syntax.EmptyBeginText != 0 {
if len(b.cap) > 0 {
b.cap[0] = pos
}
- return m.tryBacktrack(b, i, uint32(m.p.Start), pos)
- }
+ if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
+ freeBitState(b)
+ return nil
+ }
+ } else {
- // Unanchored search, starting from each possible text position.
- // Notice that we have to try the empty string at the end of
- // the text, so the loop condition is pos <= end, not pos < end.
- // This looks like it's quadratic in the size of the text,
- // but we are not clearing visited between calls to TrySearch,
- // so no work is duplicated and it ends up still being linear.
- width := -1
- for ; pos <= end && width != 0; pos += width {
- if len(m.re.prefix) > 0 {
- // Match requires literal prefix; fast search for it.
- advance := i.index(m.re, pos)
- if advance < 0 {
- return false
+ // Unanchored search, starting from each possible text position.
+ // Notice that we have to try the empty string at the end of
+ // the text, so the loop condition is pos <= end, not pos < end.
+ // This looks like it's quadratic in the size of the text,
+ // but we are not clearing visited between calls to TrySearch,
+ // so no work is duplicated and it ends up still being linear.
+ width := -1
+ for ; pos <= end && width != 0; pos += width {
+ if len(re.prefix) > 0 {
+ // Match requires literal prefix; fast search for it.
+ advance := i.index(re, pos)
+ if advance < 0 {
+ freeBitState(b)
+ return nil
+ }
+ pos += advance
}
- pos += advance
- }
- if len(b.cap) > 0 {
- b.cap[0] = pos
+ if len(b.cap) > 0 {
+ b.cap[0] = pos
+ }
+ if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
+ // Match must be leftmost; done.
+ goto Match
+ }
+ _, width = i.step(pos)
}
- if m.tryBacktrack(b, i, uint32(m.p.Start), pos) {
- // Match must be leftmost; done.
- return true
- }
- _, width = i.step(pos)
+ freeBitState(b)
+ return nil
}
- return false
+
+Match:
+ dstCap = append(dstCap, b.matchcap...)
+ freeBitState(b)
+ return dstCap
}
diff --git a/src/regexp/exec.go b/src/regexp/exec.go
index 1c7b02d1cd..efe764e2dc 100644
--- a/src/regexp/exec.go
+++ b/src/regexp/exec.go
@@ -7,6 +7,7 @@ package regexp
import (
"io"
"regexp/syntax"
+ "sync"
)
// A queue is a 'sparse array' holding pending threads of execution.
@@ -35,54 +36,60 @@ type thread struct {
// A machine holds all the state during an NFA simulation for p.
type machine struct {
- re *Regexp // corresponding Regexp
- p *syntax.Prog // compiled program
- op *onePassProg // compiled onepass program, or notOnePass
- maxBitStateLen int // max length of string to search with bitstate
- b *bitState // state for backtracker, allocated lazily
- q0, q1 queue // two queues for runq, nextq
- pool []*thread // pool of available threads
- matched bool // whether a match was found
- matchcap []int // capture information for the match
+ re *Regexp // corresponding Regexp
+ p *syntax.Prog // compiled program
+ q0, q1 queue // two queues for runq, nextq
+ pool []*thread // pool of available threads
+ matched bool // whether a match was found
+ matchcap []int // capture information for the match
+ inputs inputs
+}
+
+type inputs struct {
// cached inputs, to avoid allocation
- inputBytes inputBytes
- inputString inputString
- inputReader inputReader
+ bytes inputBytes
+ string inputString
+ reader inputReader
}
-func (m *machine) newInputBytes(b []byte) input {
- m.inputBytes.str = b
- return &m.inputBytes
+func (i *inputs) newBytes(b []byte) input {
+ i.bytes.str = b
+ return &i.bytes
}
-func (m *machine) newInputString(s string) input {
- m.inputString.str = s
- return &m.inputString
+func (i *inputs) newString(s string) input {
+ i.string.str = s
+ return &i.string
}
-func (m *machine) newInputReader(r io.RuneReader) input {
- m.inputReader.r = r
- m.inputReader.atEOT = false
- m.inputReader.pos = 0
- return &m.inputReader
+func (i *inputs) newReader(r io.RuneReader) input {
+ i.reader.r = r
+ i.reader.atEOT = false
+ i.reader.pos = 0
+ return &i.reader
}
-// progMachine returns a new machine running the prog p.
-func progMachine(p *syntax.Prog, op *onePassProg) *machine {
- m := &machine{p: p, op: op}
- n := len(m.p.Inst)
- m.q0 = queue{make([]uint32, n), make([]entry, 0, n)}
- m.q1 = queue{make([]uint32, n), make([]entry, 0, n)}
- ncap := p.NumCap
- if ncap < 2 {
- ncap = 2
+func (i *inputs) clear() {
+ // We need to clear 1 of these.
+ // Avoid the expense of clearing the others (pointer write barrier).
+ if i.bytes.str != nil {
+ i.bytes.str = nil
+ } else if i.reader.r != nil {
+ i.reader.r = nil
+ } else {
+ i.string.str = ""
}
- if op == notOnePass {
- m.maxBitStateLen = maxBitStateLen(p)
+}
+
+func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) {
+ if r != nil {
+ return i.newReader(r), 0
}
- m.matchcap = make([]int, ncap)
- return m
+ if b != nil {
+ return i.newBytes(b), len(b)
+ }
+ return i.newString(s), len(s)
}
func (m *machine) init(ncap int) {
@@ -107,6 +114,61 @@ func (m *machine) alloc(i *syntax.Inst) *thread {
return t
}
+// A lazyFlag is a lazily-evaluated syntax.EmptyOp,
+// for checking zero-width flags like ^ $ \A \z \B \b.
+// It records the pair of relevant runes and does not
+// determine the implied flags until absolutely necessary
+// (most of the time, that means never).
+type lazyFlag uint64
+
+func newLazyFlag(r1, r2 rune) lazyFlag {
+ return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2)))
+}
+
+func (f lazyFlag) match(op syntax.EmptyOp) bool {
+ if op == 0 {
+ return true
+ }
+ r1 := rune(f >> 32)
+ if op&syntax.EmptyBeginLine != 0 {
+ if r1 != '\n' && r1 >= 0 {
+ return false
+ }
+ op &^= syntax.EmptyBeginLine
+ }
+ if op&syntax.EmptyBeginText != 0 {
+ if r1 >= 0 {
+ return false
+ }
+ op &^= syntax.EmptyBeginText
+ }
+ if op == 0 {
+ return true
+ }
+ r2 := rune(f)
+ if op&syntax.EmptyEndLine != 0 {
+ if r2 != '\n' && r2 >= 0 {
+ return false
+ }
+ op &^= syntax.EmptyEndLine
+ }
+ if op&syntax.EmptyEndText != 0 {
+ if r2 >= 0 {
+ return false
+ }
+ op &^= syntax.EmptyEndText
+ }
+ if op == 0 {
+ return true
+ }
+ if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) {
+ op &^= syntax.EmptyWordBoundary
+ } else {
+ op &^= syntax.EmptyNoWordBoundary
+ }
+ return op == 0
+}
+
// match runs the machine over the input starting at pos.
// It reports whether a match was found.
// If so, m.matchcap holds the submatch information.
@@ -126,9 +188,9 @@ func (m *machine) match(i input, pos int) bool {
if r != endOfText {
r1, width1 = i.step(pos + width)
}
- var flag syntax.EmptyOp
+ var flag lazyFlag
if pos == 0 {
- flag = syntax.EmptyOpContext(-1, r)
+ flag = newLazyFlag(-1, r)
} else {
flag = i.context(pos)
}
@@ -157,10 +219,10 @@ func (m *machine) match(i input, pos int) bool {
if len(m.matchcap) > 0 {
m.matchcap[0] = pos
}
- m.add(runq, uint32(m.p.Start), pos, m.matchcap, flag, nil)
+ m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil)
}
- flag = syntax.EmptyOpContext(r, r1)
- m.step(runq, nextq, pos, pos+width, r, flag)
+ flag = newLazyFlag(r, r1)
+ m.step(runq, nextq, pos, pos+width, r, &flag)
if width == 0 {
break
}
@@ -195,7 +257,7 @@ func (m *machine) clear(q *queue) {
// The step processes the rune c (which may be endOfText),
// which starts at position pos and ends at nextPos.
// nextCond gives the setting for the empty-width flags after c.
-func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond syntax.EmptyOp) {
+func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) {
longest := m.re.longest
for j := 0; j < len(runq.dense); j++ {
d := &runq.dense[j]
@@ -252,7 +314,8 @@ func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond sy
// It also recursively adds an entry for all instructions reachable from pc by following
// empty-width conditions satisfied by cond. pos gives the current position
// in the input.
-func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.EmptyOp, t *thread) *thread {
+func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread {
+Again:
if pc == 0 {
return t
}
@@ -275,13 +338,16 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty
// nothing
case syntax.InstAlt, syntax.InstAltMatch:
t = m.add(q, i.Out, pos, cap, cond, t)
- t = m.add(q, i.Arg, pos, cap, cond, t)
+ pc = i.Arg
+ goto Again
case syntax.InstEmptyWidth:
- if syntax.EmptyOp(i.Arg)&^cond == 0 {
- t = m.add(q, i.Out, pos, cap, cond, t)
+ if cond.match(syntax.EmptyOp(i.Arg)) {
+ pc = i.Out
+ goto Again
}
case syntax.InstNop:
- t = m.add(q, i.Out, pos, cap, cond, t)
+ pc = i.Out
+ goto Again
case syntax.InstCapture:
if int(i.Arg) < len(cap) {
opos := cap[i.Arg]
@@ -289,7 +355,8 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty
m.add(q, i.Out, pos, cap, cond, nil)
cap[i.Arg] = opos
} else {
- t = m.add(q, i.Out, pos, cap, cond, t)
+ pc = i.Out
+ goto Again
}
case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
if t == nil {
@@ -306,85 +373,112 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty
return t
}
-// onepass runs the machine over the input starting at pos.
-// It reports whether a match was found.
-// If so, m.matchcap holds the submatch information.
-// ncap is the number of captures.
-func (m *machine) onepass(i input, pos, ncap int) bool {
- startCond := m.re.cond
- if startCond == ^syntax.EmptyOp(0) { // impossible
- return false
+type onePassMachine struct {
+ inputs inputs
+ matchcap []int
+}
+
+var onePassPool sync.Pool
+
+func newOnePassMachine() *onePassMachine {
+ m, ok := onePassPool.Get().(*onePassMachine)
+ if !ok {
+ m = new(onePassMachine)
}
- m.matched = false
- m.matchcap = m.matchcap[:ncap]
+ return m
+}
+
+func freeOnePassMachine(m *onePassMachine) {
+ m.inputs.clear()
+ onePassPool.Put(m)
+}
+
+// doOnePass implements r.doExecute using the one-pass execution engine.
+func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int {
+ startCond := re.cond
+ if startCond == ^syntax.EmptyOp(0) { // impossible
+ return nil
+ }
+
+ m := newOnePassMachine()
+ if cap(m.matchcap) < ncap {
+ m.matchcap = make([]int, ncap)
+ } else {
+ m.matchcap = m.matchcap[:ncap]
+ }
+
+ matched := false
for i := range m.matchcap {
m.matchcap[i] = -1
}
+
+ i, _ := m.inputs.init(ir, ib, is)
+
r, r1 := endOfText, endOfText
width, width1 := 0, 0
r, width = i.step(pos)
if r != endOfText {
r1, width1 = i.step(pos + width)
}
- var flag syntax.EmptyOp
+ var flag lazyFlag
if pos == 0 {
- flag = syntax.EmptyOpContext(-1, r)
+ flag = newLazyFlag(-1, r)
} else {
flag = i.context(pos)
}
- pc := m.op.Start
- inst := m.op.Inst[pc]
+ pc := re.onepass.Start
+ inst := re.onepass.Inst[pc]
// If there is a simple literal prefix, skip over it.
- if pos == 0 && syntax.EmptyOp(inst.Arg)&^flag == 0 &&
- len(m.re.prefix) > 0 && i.canCheckPrefix() {
+ if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) &&
+ len(re.prefix) > 0 && i.canCheckPrefix() {
// Match requires literal prefix; fast search for it.
- if !i.hasPrefix(m.re) {
- return m.matched
+ if !i.hasPrefix(re) {
+ goto Return
}
- pos += len(m.re.prefix)
+ pos += len(re.prefix)
r, width = i.step(pos)
r1, width1 = i.step(pos + width)
flag = i.context(pos)
- pc = int(m.re.prefixEnd)
+ pc = int(re.prefixEnd)
}
for {
- inst = m.op.Inst[pc]
+ inst = re.onepass.Inst[pc]
pc = int(inst.Out)
switch inst.Op {
default:
panic("bad inst")
case syntax.InstMatch:
- m.matched = true
+ matched = true
if len(m.matchcap) > 0 {
m.matchcap[0] = 0
m.matchcap[1] = pos
}
- return m.matched
+ goto Return
case syntax.InstRune:
if !inst.MatchRune(r) {
- return m.matched
+ goto Return
}
case syntax.InstRune1:
if r != inst.Rune[0] {
- return m.matched
+ goto Return
}
case syntax.InstRuneAny:
// Nothing
case syntax.InstRuneAnyNotNL:
if r == '\n' {
- return m.matched
+ goto Return
}
// peek at the input rune to see which branch of the Alt to take
case syntax.InstAlt, syntax.InstAltMatch:
pc = int(onePassNext(&inst, r))
continue
case syntax.InstFail:
- return m.matched
+ goto Return
case syntax.InstNop:
continue
case syntax.InstEmptyWidth:
- if syntax.EmptyOp(inst.Arg)&^flag != 0 {
- return m.matched
+ if !flag.match(syntax.EmptyOp(inst.Arg)) {
+ goto Return
}
continue
case syntax.InstCapture:
@@ -396,14 +490,23 @@ func (m *machine) onepass(i input, pos, ncap int) bool {
if width == 0 {
break
}
- flag = syntax.EmptyOpContext(r, r1)
+ flag = newLazyFlag(r, r1)
pos += width
r, width = r1, width1
if r != endOfText {
r1, width1 = i.step(pos + width)
}
}
- return m.matched
+
+Return:
+ if !matched {
+ freeOnePassMachine(m)
+ return nil
+ }
+
+ dstCap = append(dstCap, m.matchcap...)
+ freeOnePassMachine(m)
+ return dstCap
}
// doMatch reports whether either r, b or s match the regexp.
@@ -416,43 +519,28 @@ func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool {
//
// nil is returned if no matches are found and non-nil if matches are found.
func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int {
- m := re.get()
- var i input
- var size int
- if r != nil {
- i = m.newInputReader(r)
- } else if b != nil {
- i = m.newInputBytes(b)
- size = len(b)
- } else {
- i = m.newInputString(s)
- size = len(s)
- }
- if m.op != notOnePass {
- if !m.onepass(i, pos, ncap) {
- re.put(m)
- return nil
- }
- } else if size < m.maxBitStateLen && r == nil {
- if m.b == nil {
- m.b = newBitState(m.p)
- }
- if !m.backtrack(i, pos, size, ncap) {
- re.put(m)
- return nil
- }
- } else {
- m.init(ncap)
- if !m.match(i, pos) {
- re.put(m)
- return nil
- }
- }
- dstCap = append(dstCap, m.matchcap...)
if dstCap == nil {
- // Keep the promise of returning non-nil value on match.
- dstCap = arrayNoInts[:0]
+ // Make sure 'return dstCap' is non-nil.
+ dstCap = arrayNoInts[:0:0]
}
+
+ if re.onepass != nil {
+ return re.doOnePass(r, b, s, pos, ncap, dstCap)
+ }
+ if r == nil && len(b)+len(s) < re.maxBitStateLen {
+ return re.backtrack(b, s, pos, ncap, dstCap)
+ }
+
+ m := re.get()
+ i, _ := m.inputs.init(r, b, s)
+
+ m.init(ncap)
+ if !m.match(i, pos) {
+ re.put(m)
+ return nil
+ }
+
+ dstCap = append(dstCap, m.matchcap...)
re.put(m)
return dstCap
}
diff --git a/src/regexp/exec_test.go b/src/regexp/exec_test.go
index 5f8e747b17..1489219328 100644
--- a/src/regexp/exec_test.go
+++ b/src/regexp/exec_test.go
@@ -684,7 +684,7 @@ func BenchmarkMatch(b *testing.B) {
func BenchmarkMatch_onepass_regex(b *testing.B) {
isRaceBuilder := strings.HasSuffix(testenv.Builder(), "-race")
r := MustCompile(`(?s)\A.*\z`)
- if r.get().op == notOnePass {
+ if r.onepass == nil {
b.Fatalf("want onepass regex, but %q is not onepass", r)
}
for _, size := range benchSizes {
@@ -692,18 +692,12 @@ func BenchmarkMatch_onepass_regex(b *testing.B) {
continue
}
t := makeText(size.n)
- bs := make([][]byte, len(t))
- for i, s := range t {
- bs[i] = []byte{s}
- }
b.Run(size.name, func(b *testing.B) {
b.SetBytes(int64(size.n))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- for _, byts := range bs {
- if !r.Match(byts) {
- b.Fatal("not match!")
- }
+ if !r.Match(t) {
+ b.Fatal("not match!")
}
}
})
diff --git a/src/regexp/onepass.go b/src/regexp/onepass.go
index 125be59a7d..2f3ce6f9f6 100644
--- a/src/regexp/onepass.go
+++ b/src/regexp/onepass.go
@@ -294,12 +294,12 @@ var anyRune = []rune{0, unicode.MaxRune}
// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt,
// the match engine can always tell which branch to take. The routine may modify
// p if it is turned into a onepass Prog. If it isn't possible for this to be a
-// onepass Prog, the Prog notOnePass is returned. makeOnePass is recursive
+// onepass Prog, the Prog nil is returned. makeOnePass is recursive
// to the size of the Prog.
func makeOnePass(p *onePassProg) *onePassProg {
// If the machine is very long, it's not worth the time to check if we can use one pass.
if len(p.Inst) >= 1000 {
- return notOnePass
+ return nil
}
var (
@@ -446,11 +446,11 @@ func makeOnePass(p *onePassProg) *onePassProg {
visitQueue.clear()
pc := instQueue.next()
if !check(pc, m) {
- p = notOnePass
+ p = nil
break
}
}
- if p != notOnePass {
+ if p != nil {
for i := range p.Inst {
p.Inst[i].Rune = onePassRunes[i]
}
@@ -458,20 +458,18 @@ func makeOnePass(p *onePassProg) *onePassProg {
return p
}
-var notOnePass *onePassProg = nil
-
// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog
-// can be recharacterized as a one-pass regexp program, or syntax.notOnePass if the
+// can be recharacterized as a one-pass regexp program, or syntax.nil if the
// Prog cannot be converted. For a one pass prog, the fundamental condition that must
// be true is: at any InstAlt, there must be no ambiguity about what branch to take.
func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
if prog.Start == 0 {
- return notOnePass
+ return nil
}
// onepass regexp is anchored
if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth ||
syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText {
- return notOnePass
+ return nil
}
// every instruction leading to InstMatch must be EmptyEndText
for _, inst := range prog.Inst {
@@ -479,18 +477,18 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
switch inst.Op {
default:
if opOut == syntax.InstMatch {
- return notOnePass
+ return nil
}
case syntax.InstAlt, syntax.InstAltMatch:
if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch {
- return notOnePass
+ return nil
}
case syntax.InstEmptyWidth:
if opOut == syntax.InstMatch {
if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText {
continue
}
- return notOnePass
+ return nil
}
}
}
@@ -501,7 +499,7 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
// checkAmbiguity on InstAlts, build onepass Prog if possible
p = makeOnePass(p)
- if p != notOnePass {
+ if p != nil {
cleanupOnePass(p, prog)
}
return p
diff --git a/src/regexp/onepass_test.go b/src/regexp/onepass_test.go
index b1caa44515..a0f2e39048 100644
--- a/src/regexp/onepass_test.go
+++ b/src/regexp/onepass_test.go
@@ -134,47 +134,45 @@ func TestMergeRuneSet(t *testing.T) {
}
}
-var onePass = &onePassProg{}
-
var onePassTests = []struct {
- re string
- onePass *onePassProg
+ re string
+ isOnePass bool
}{
- {`^(?:a|(?:a*))$`, notOnePass},
- {`^(?:(a)|(?:a*))$`, notOnePass},
- {`^(?:(?:(?:.(?:$))?))$`, onePass},
- {`^abcd$`, onePass},
- {`^(?:(?:a{0,})*?)$`, onePass},
- {`^(?:(?:a+)*)$`, onePass},
- {`^(?:(?:a|(?:aa)))$`, onePass},
- {`^(?:[^\s\S])$`, onePass},
- {`^(?:(?:a{3,4}){0,})$`, notOnePass},
- {`^(?:(?:(?:a*)+))$`, onePass},
- {`^[a-c]+$`, onePass},
- {`^[a-c]*$`, onePass},
- {`^(?:a*)$`, onePass},
- {`^(?:(?:aa)|a)$`, onePass},
- {`^[a-c]*`, notOnePass},
- {`^...$`, onePass},
- {`^(?:a|(?:aa))$`, onePass},
- {`^a((b))c$`, onePass},
- {`^a.[l-nA-Cg-j]?e$`, onePass},
- {`^a((b))$`, onePass},
- {`^a(?:(b)|(c))c$`, onePass},
- {`^a(?:(b*)|(c))c$`, notOnePass},
- {`^a(?:b|c)$`, onePass},
- {`^a(?:b?|c)$`, onePass},
- {`^a(?:b?|c?)$`, notOnePass},
- {`^a(?:b?|c+)$`, onePass},
- {`^a(?:b+|(bc))d$`, notOnePass},
- {`^a(?:bc)+$`, onePass},
- {`^a(?:[bcd])+$`, onePass},
- {`^a((?:[bcd])+)$`, onePass},
- {`^a(:?b|c)*d$`, onePass},
- {`^.bc(d|e)*$`, onePass},
- {`^(?:(?:aa)|.)$`, notOnePass},
- {`^(?:(?:a{1,2}){1,2})$`, notOnePass},
- {`^l` + strings.Repeat("o", 2<<8) + `ng$`, onePass},
+ {`^(?:a|(?:a*))$`, false},
+ {`^(?:(a)|(?:a*))$`, false},
+ {`^(?:(?:(?:.(?:$))?))$`, true},
+ {`^abcd$`, true},
+ {`^(?:(?:a{0,})*?)$`, true},
+ {`^(?:(?:a+)*)$`, true},
+ {`^(?:(?:a|(?:aa)))$`, true},
+ {`^(?:[^\s\S])$`, true},
+ {`^(?:(?:a{3,4}){0,})$`, false},
+ {`^(?:(?:(?:a*)+))$`, true},
+ {`^[a-c]+$`, true},
+ {`^[a-c]*$`, true},
+ {`^(?:a*)$`, true},
+ {`^(?:(?:aa)|a)$`, true},
+ {`^[a-c]*`, false},
+ {`^...$`, true},
+ {`^(?:a|(?:aa))$`, true},
+ {`^a((b))c$`, true},
+ {`^a.[l-nA-Cg-j]?e$`, true},
+ {`^a((b))$`, true},
+ {`^a(?:(b)|(c))c$`, true},
+ {`^a(?:(b*)|(c))c$`, false},
+ {`^a(?:b|c)$`, true},
+ {`^a(?:b?|c)$`, true},
+ {`^a(?:b?|c?)$`, false},
+ {`^a(?:b?|c+)$`, true},
+ {`^a(?:b+|(bc))d$`, false},
+ {`^a(?:bc)+$`, true},
+ {`^a(?:[bcd])+$`, true},
+ {`^a((?:[bcd])+)$`, true},
+ {`^a(:?b|c)*d$`, true},
+ {`^.bc(d|e)*$`, true},
+ {`^(?:(?:aa)|.)$`, false},
+ {`^(?:(?:a{1,2}){1,2})$`, false},
+ {`^l` + strings.Repeat("o", 2<<8) + `ng$`, true},
}
func TestCompileOnePass(t *testing.T) {
@@ -194,9 +192,9 @@ func TestCompileOnePass(t *testing.T) {
t.Errorf("Compile(%q) got err:%s, want success", test.re, err)
continue
}
- onePass = compileOnePass(p)
- if (onePass == notOnePass) != (test.onePass == notOnePass) {
- t.Errorf("CompileOnePass(%q) got %v, expected %v", test.re, onePass, test.onePass)
+ isOnePass := compileOnePass(p) != nil
+ if isOnePass != test.isOnePass {
+ t.Errorf("CompileOnePass(%q) got isOnePass=%v, expected %v", test.re, isOnePass, test.isOnePass)
}
}
}
@@ -216,8 +214,8 @@ func TestRunOnePass(t *testing.T) {
t.Errorf("Compile(%q): got err: %s", test.re, err)
continue
}
- if re.onepass == notOnePass {
- t.Errorf("Compile(%q): got notOnePass, want one-pass", test.re)
+ if re.onepass == nil {
+ t.Errorf("Compile(%q): got nil, want one-pass", test.re)
continue
}
if !re.MatchString(test.match) {
@@ -227,21 +225,11 @@ func TestRunOnePass(t *testing.T) {
}
func BenchmarkCompileOnepass(b *testing.B) {
- for _, test := range onePassTests {
- if test.onePass == notOnePass {
- continue
+ b.ReportAllocs()
+ const re = `^a.[l-nA-Cg-j]?e$`
+ for i := 0; i < b.N; i++ {
+ if _, err := Compile(re); err != nil {
+ b.Fatal(err)
}
- name := test.re
- if len(name) > 20 {
- name = name[:20] + "..."
- }
- b.Run(name, func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- if _, err := Compile(test.re); err != nil {
- b.Fatal(err)
- }
- }
- })
}
}
diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go
index 89bb975ac1..38b3c86d9f 100644
--- a/src/regexp/regexp.go
+++ b/src/regexp/regexp.go
@@ -79,27 +79,24 @@ import (
// A Regexp is safe for concurrent use by multiple goroutines,
// except for configuration methods, such as Longest.
type Regexp struct {
- // read-only after Compile
- regexpRO
-
- // cache of machines for running regexp
- mu sync.Mutex
- machine []*machine
-}
-
-type regexpRO struct {
- expr string // as passed to Compile
- prog *syntax.Prog // compiled program
- onepass *onePassProg // onepass program or nil
+ expr string // as passed to Compile
+ prog *syntax.Prog // compiled program
+ onepass *onePassProg // onepass program or nil
+ numSubexp int
+ maxBitStateLen int
+ subexpNames []string
prefix string // required prefix in unanchored matches
prefixBytes []byte // prefix, as a []byte
- prefixComplete bool // prefix is the entire regexp
prefixRune rune // first rune in prefix
prefixEnd uint32 // pc for last rune in prefix
+ mpool int // pool for machines
+ matchcap int // size of recorded match lengths
+ prefixComplete bool // prefix is the entire regexp
cond syntax.EmptyOp // empty-width conditions required at start of match
- numSubexp int
- subexpNames []string
- longest bool
+
+ // This field can be modified by the Longest method,
+ // but it is otherwise read-only.
+ longest bool // whether regexp prefers leftmost-longest match
}
// String returns the source text used to compile the regular expression.
@@ -108,15 +105,16 @@ func (re *Regexp) String() string {
}
// Copy returns a new Regexp object copied from re.
+// Calling Longest on one copy does not affect another.
//
-// When using a Regexp in multiple goroutines, giving each goroutine
-// its own copy helps to avoid lock contention.
+// Deprecated: In earlier releases, when using a Regexp in multiple goroutines,
+// giving each goroutine its own copy helped to avoid lock contention.
+// As of Go 1.12, using Copy is no longer necessary to avoid lock contention.
+// Copy may still be appropriate if the reason for its use is to make
+// two copies with different Longest settings.
func (re *Regexp) Copy() *Regexp {
- // It is not safe to copy Regexp by value
- // since it contains a sync.Mutex.
- return &Regexp{
- regexpRO: re.regexpRO,
- }
+ re2 := *re
+ return &re2
}
// Compile parses a regular expression and returns, if successful,
@@ -179,19 +177,23 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) {
if err != nil {
return nil, err
}
- regexp := &Regexp{
- regexpRO: regexpRO{
- expr: expr,
- prog: prog,
- onepass: compileOnePass(prog),
- numSubexp: maxCap,
- subexpNames: capNames,
- cond: prog.StartCond(),
- longest: longest,
- },
+ matchcap := prog.NumCap
+ if matchcap < 2 {
+ matchcap = 2
}
- if regexp.onepass == notOnePass {
+ regexp := &Regexp{
+ expr: expr,
+ prog: prog,
+ onepass: compileOnePass(prog),
+ numSubexp: maxCap,
+ subexpNames: capNames,
+ cond: prog.StartCond(),
+ longest: longest,
+ matchcap: matchcap,
+ }
+ if regexp.onepass == nil {
regexp.prefix, regexp.prefixComplete = prog.Prefix()
+ regexp.maxBitStateLen = maxBitStateLen(prog)
} else {
regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog)
}
@@ -201,39 +203,64 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) {
regexp.prefixBytes = []byte(regexp.prefix)
regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix)
}
+
+ n := len(prog.Inst)
+ i := 0
+ for matchSize[i] != 0 && matchSize[i] < n {
+ i++
+ }
+ regexp.mpool = i
+
return regexp, nil
}
+// Pools of *machine for use during (*Regexp).doExecute,
+// split up by the size of the execution queues.
+// matchPool[i] machines have queue size matchSize[i].
+// On a 64-bit system each queue entry is 16 bytes,
+// so matchPool[0] has 16*2*128 = 4kB queues, etc.
+// The final matchPool is a catch-all for very large queues.
+var (
+ matchSize = [...]int{128, 512, 2048, 16384, 0}
+ matchPool [len(matchSize)]sync.Pool
+)
+
// get returns a machine to use for matching re.
// It uses the re's machine cache if possible, to avoid
// unnecessary allocation.
func (re *Regexp) get() *machine {
- re.mu.Lock()
- if n := len(re.machine); n > 0 {
- z := re.machine[n-1]
- re.machine = re.machine[:n-1]
- re.mu.Unlock()
- return z
+ m, ok := matchPool[re.mpool].Get().(*machine)
+ if !ok {
+ m = new(machine)
}
- re.mu.Unlock()
- z := progMachine(re.prog, re.onepass)
- z.re = re
- return z
+ m.re = re
+ m.p = re.prog
+ if cap(m.matchcap) < re.matchcap {
+ m.matchcap = make([]int, re.matchcap)
+ for _, t := range m.pool {
+ t.cap = make([]int, re.matchcap)
+ }
+ }
+
+ // Allocate queues if needed.
+ // Or reallocate, for "large" match pool.
+ n := matchSize[re.mpool]
+ if n == 0 { // large pool
+ n = len(re.prog.Inst)
+ }
+ if len(m.q0.sparse) < n {
+ m.q0 = queue{make([]uint32, n), make([]entry, 0, n)}
+ m.q1 = queue{make([]uint32, n), make([]entry, 0, n)}
+ }
+ return m
}
-// put returns a machine to the re's machine cache.
-// There is no attempt to limit the size of the cache, so it will
-// grow to the maximum number of simultaneous matches
-// run using re. (The cache empties when re gets garbage collected.)
-func (re *Regexp) put(z *machine) {
- // Remove references to input data that we no longer need.
- z.inputBytes.str = nil
- z.inputString.str = ""
- z.inputReader.r = nil
-
- re.mu.Lock()
- re.machine = append(re.machine, z)
- re.mu.Unlock()
+// put returns a machine to the correct machine pool.
+func (re *Regexp) put(m *machine) {
+ m.re = nil
+ m.p = nil
+ m.inputs.clear()
+ matchPool[re.mpool].Put(m)
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
@@ -288,7 +315,7 @@ type input interface {
canCheckPrefix() bool // can we look ahead without losing info?
hasPrefix(re *Regexp) bool
index(re *Regexp, pos int) int
- context(pos int) syntax.EmptyOp
+ context(pos int) lazyFlag
}
// inputString scans a string.
@@ -319,7 +346,7 @@ func (i *inputString) index(re *Regexp, pos int) int {
return strings.Index(i.str[pos:], re.prefix)
}
-func (i *inputString) context(pos int) syntax.EmptyOp {
+func (i *inputString) context(pos int) lazyFlag {
r1, r2 := endOfText, endOfText
// 0 < pos && pos <= len(i.str)
if uint(pos-1) < uint(len(i.str)) {
@@ -335,7 +362,7 @@ func (i *inputString) context(pos int) syntax.EmptyOp {
r2, _ = utf8.DecodeRuneInString(i.str[pos:])
}
}
- return syntax.EmptyOpContext(r1, r2)
+ return newLazyFlag(r1, r2)
}
// inputBytes scans a byte slice.
@@ -366,7 +393,7 @@ func (i *inputBytes) index(re *Regexp, pos int) int {
return bytes.Index(i.str[pos:], re.prefixBytes)
}
-func (i *inputBytes) context(pos int) syntax.EmptyOp {
+func (i *inputBytes) context(pos int) lazyFlag {
r1, r2 := endOfText, endOfText
// 0 < pos && pos <= len(i.str)
if uint(pos-1) < uint(len(i.str)) {
@@ -382,7 +409,7 @@ func (i *inputBytes) context(pos int) syntax.EmptyOp {
r2, _ = utf8.DecodeRune(i.str[pos:])
}
}
- return syntax.EmptyOpContext(r1, r2)
+ return newLazyFlag(r1, r2)
}
// inputReader scans a RuneReader.
@@ -418,8 +445,8 @@ func (i *inputReader) index(re *Regexp, pos int) int {
return -1
}
-func (i *inputReader) context(pos int) syntax.EmptyOp {
- return 0
+func (i *inputReader) context(pos int) lazyFlag {
+ return 0 // not used
}
// LiteralPrefix returns a literal string that must begin any match
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index b180cb06ab..87076817f9 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -140,7 +140,9 @@ TEXT runtime·gogo(SB), NOSPLIT, $16-8
MOVD 0(g), R4
MOVD gobuf_sp(R5), R1
MOVD gobuf_lr(R5), R31
+#ifndef GOOS_aix
MOVD 24(R1), R2 // restore R2
+#endif
MOVD R31, LR
MOVD gobuf_ret(R5), R3
MOVD gobuf_ctxt(R5), R11
@@ -257,7 +259,9 @@ switch:
MOVD g_m(g), R3
MOVD m_curg(R3), g
MOVD (g_sched+gobuf_sp)(g), R3
+#ifndef GOOS_aix
MOVD 24(R3), R2
+#endif
// switch back to g
MOVD g_m(g), R3
MOVD m_curg(R3), g
@@ -274,7 +278,9 @@ noswitch:
MOVD 0(R11), R12 // code pointer
MOVD R12, CTR
BL (CTR)
+#ifndef GOOS_aix
MOVD 24(R1), R2
+#endif
RET
/*
@@ -422,11 +428,20 @@ tail: \
callfn: \
/* call function */ \
MOVD f+8(FP), R11; \
+#ifdef GOOS_aix \
+ /* AIX won't trigger a SIGSEGV if R11 = nil */ \
+ /* So it manually triggers it */ \
+ CMP R0, R11 \
+ BNE 2(PC) \
+ MOVD R0, 0(R0) \
+#endif \
MOVD (R11), R12; \
MOVD R12, CTR; \
PCDATA $PCDATA_StackMapIndex, $0; \
BL (CTR); \
+#ifndef GOOS_aix \
MOVD 24(R1), R2; \
+#endif \
/* copy return values back */ \
MOVD argtype+0(FP), R7; \
MOVD arg+16(FP), R3; \
@@ -504,12 +519,22 @@ again:
// the BL deferreturn and jmpdefer rewinds to that.
TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVD 0(R1), R31
+#ifdef GOOS_aix
+ MOVD 16(R31), R31 // caller LR is on the previous stack frame on AIX
+#endif
SUB $8, R31
MOVD R31, LR
MOVD fv+0(FP), R11
MOVD argp+8(FP), R1
SUB $FIXED_FRAME, R1
+#ifdef GOOS_aix
+ // AIX won't trigger a SIGSEGV if R11 = nil
+ // So it manually triggers it
+ CMP R0, R11
+ BNE 2(PC)
+ MOVD R0, 0(R0)
+#endif
MOVD 0(R11), R12
MOVD R12, CTR
BR (CTR)
@@ -542,8 +567,13 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// Figure out if we need to switch to m->g0 stack.
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already.
- MOVD g_m(g), R6
- MOVD m_g0(R6), R6
+ // Moreover, if it's called inside the signal handler, it must not switch
+ // to g0 as it can be in use by another syscall.
+ MOVD g_m(g), R8
+ MOVD m_gsignal(R8), R6
+ CMP R6, g
+ BEQ g0
+ MOVD m_g0(R8), R6
CMP R6, g
BEQ g0
BL gosave<>(SB)
@@ -555,13 +585,22 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
g0:
// Save room for two of our pointers, plus 32 bytes of callee
// save area that lives on the caller stack.
+#ifdef GOOS_aix
+ // Create a fake LR to improve backtrace.
+ MOVD $runtime·asmcgocall(SB), R6
+ MOVD R6, 16(R1)
+#endif
SUB $48, R1
RLDCR $0, R1, $~15, R1 // 16-byte alignment for gcc ABI
MOVD R5, 40(R1) // save old g on stack
MOVD (g_stack+stack_hi)(R5), R5
SUB R7, R5
MOVD R5, 32(R1) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
+#ifdef GOOS_aix
+ MOVD R7, 0(R1) // Save frame pointer to allow manual backtrace with gdb
+#else
MOVD R0, 0(R1) // clear back chain pointer (TODO can we give it real back trace information?)
+#endif
// This is a "global call", so put the global entry point in r12
MOVD R3, R12
MOVD R12, CTR
@@ -574,15 +613,14 @@ g0:
// Restore g, stack pointer, toc pointer.
// R3 is errno, so don't touch it
MOVD 40(R1), g
- MOVD (g_stack+stack_hi)(g), R5
- MOVD 32(R1), R6
- SUB R6, R5
- MOVD 24(R5), R2
- BL runtime·save_g(SB)
MOVD (g_stack+stack_hi)(g), R5
MOVD 32(R1), R6
SUB R6, R5
+#ifndef GOOS_aix
+ MOVD 24(R5), R2
+#endif
MOVD R5, R1
+ BL runtime·save_g(SB)
MOVW R3, ret+16(FP)
RET
@@ -610,7 +648,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$16-32
NO_LOCAL_POINTERS
// Load m and g from thread-local storage.
- MOVB runtime·iscgo(SB), R3
+ MOVBZ runtime·iscgo(SB), R3
CMP R3, $0
BEQ nocgo
BL runtime·load_g(SB)
diff --git a/src/runtime/cgo/gcc_libinit.c b/src/runtime/cgo/gcc_libinit.c
index 3dc5bde4cc..3dafd10b7b 100644
--- a/src/runtime/cgo/gcc_libinit.c
+++ b/src/runtime/cgo/gcc_libinit.c
@@ -63,7 +63,7 @@ _cgo_wait_runtime_init_done() {
}
void
-x_cgo_notify_runtime_init_done(void* dummy) {
+x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) {
pthread_mutex_lock(&runtime_init_mu);
runtime_init_done = 1;
pthread_cond_broadcast(&runtime_init_cond);
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 5cf0b86f58..8662f00e13 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -19,6 +19,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/math"
"unsafe"
)
@@ -78,7 +79,8 @@ func makechan(t *chantype, size int) *hchan {
throw("makechan: bad alignment")
}
- if size < 0 || uintptr(size) > maxSliceCap(elem.size) || uintptr(size)*elem.size > maxAlloc-hchanSize {
+ mem, overflow := math.MulUintptr(elem.size, uintptr(size))
+ if overflow || mem > maxAlloc-hchanSize || size < 0 {
panic(plainError("makechan: size out of range"))
}
@@ -88,7 +90,7 @@ func makechan(t *chantype, size int) *hchan {
// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
var c *hchan
switch {
- case size == 0 || elem.size == 0:
+ case mem == 0:
// Queue or element size is zero.
c = (*hchan)(mallocgc(hchanSize, nil, true))
// Race detector uses this location for synchronization.
@@ -96,12 +98,12 @@ func makechan(t *chantype, size int) *hchan {
case elem.kind&kindNoPointers != 0:
// Elements do not contain pointers.
// Allocate hchan and buf in one call.
- c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
+ c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
c.buf = add(unsafe.Pointer(c), hchanSize)
default:
// Elements contain pointers.
c = new(hchan)
- c.buf = mallocgc(uintptr(size)*elem.size, elem, true)
+ c.buf = mallocgc(mem, elem, true)
}
c.elemsize = uint16(elem.size)
diff --git a/src/runtime/defs_freebsd.go b/src/runtime/defs_freebsd.go
index 29a6ec20a5..53c1508eb7 100644
--- a/src/runtime/defs_freebsd.go
+++ b/src/runtime/defs_freebsd.go
@@ -19,6 +19,7 @@ package runtime
#include
#include
#include
+#define _WANT_FREEBSD11_KEVENT 1
#include
#include
#include
@@ -149,7 +150,7 @@ type Itimerval C.struct_itimerval
type Umtx_time C.struct__umtx_time
-type Kevent C.struct_kevent
+type Kevent C.struct_kevent_freebsd11
type bintime C.struct_bintime
type vdsoTimehands C.struct_vdso_timehands
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index 7ab731151e..1ef9825a48 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -329,38 +329,27 @@ func convT2E64(t *_type, val uint64) (e eface) {
return
}
-func convT2Estring(t *_type, elem unsafe.Pointer) (e eface) {
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Estring))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
+func convT2Estring(t *_type, val string) (e eface) {
var x unsafe.Pointer
- if *(*string)(elem) == "" {
+ if val == "" {
x = unsafe.Pointer(&zeroVal[0])
} else {
- x = mallocgc(t.size, t, true)
- *(*string)(x) = *(*string)(elem)
+ x = mallocgc(unsafe.Sizeof(val), t, true)
+ *(*string)(x) = val
}
e._type = t
e.data = x
return
}
-func convT2Eslice(t *_type, elem unsafe.Pointer) (e eface) {
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Eslice))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
+func convT2Eslice(t *_type, val []byte) (e eface) {
+ // Note: this must work for any element type, not just byte.
var x unsafe.Pointer
- if v := *(*slice)(elem); uintptr(v.array) == 0 {
+ if (*slice)(unsafe.Pointer(&val)).array == nil {
x = unsafe.Pointer(&zeroVal[0])
} else {
- x = mallocgc(t.size, t, true)
- *(*slice)(x) = *(*slice)(elem)
+ x = mallocgc(unsafe.Sizeof(val), t, true)
+ *(*[]byte)(x) = val
}
e._type = t
e.data = x
@@ -438,40 +427,29 @@ func convT2I64(tab *itab, val uint64) (i iface) {
return
}
-func convT2Istring(tab *itab, elem unsafe.Pointer) (i iface) {
+func convT2Istring(tab *itab, val string) (i iface) {
t := tab._type
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Istring))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
var x unsafe.Pointer
- if *(*string)(elem) == "" {
+ if val == "" {
x = unsafe.Pointer(&zeroVal[0])
} else {
- x = mallocgc(t.size, t, true)
- *(*string)(x) = *(*string)(elem)
+ x = mallocgc(unsafe.Sizeof(val), t, true)
+ *(*string)(x) = val
}
i.tab = tab
i.data = x
return
}
-func convT2Islice(tab *itab, elem unsafe.Pointer) (i iface) {
+func convT2Islice(tab *itab, val []byte) (i iface) {
+ // Note: this must work for any element type, not just byte.
t := tab._type
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Islice))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
var x unsafe.Pointer
- if v := *(*slice)(elem); uintptr(v.array) == 0 {
+ if (*slice)(unsafe.Pointer(&val)).array == nil {
x = unsafe.Pointer(&zeroVal[0])
} else {
- x = mallocgc(t.size, t, true)
- *(*slice)(x) = *(*slice)(elem)
+ x = mallocgc(unsafe.Sizeof(val), t, true)
+ *(*[]byte)(x) = val
}
i.tab = tab
i.data = x
diff --git a/src/runtime/internal/atomic/asm_386.s b/src/runtime/internal/atomic/asm_386.s
index 86a3ef33b9..13289a88d0 100644
--- a/src/runtime/internal/atomic/asm_386.s
+++ b/src/runtime/internal/atomic/asm_386.s
@@ -23,6 +23,9 @@ TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13
TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13
JMP runtime∕internal∕atomic·Cas(SB)
+TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-13
+ JMP runtime∕internal∕atomic·Cas(SB)
+
TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8
JMP runtime∕internal∕atomic·Load(SB)
@@ -180,6 +183,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
XCHGL AX, 0(BX)
RET
+TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Store(SB)
+
// uint64 atomicload64(uint64 volatile* addr);
TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
diff --git a/src/runtime/internal/atomic/asm_amd64.s b/src/runtime/internal/atomic/asm_amd64.s
index 6fb5211c9c..e18aee7d59 100644
--- a/src/runtime/internal/atomic/asm_amd64.s
+++ b/src/runtime/internal/atomic/asm_amd64.s
@@ -43,6 +43,9 @@ TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
JMP runtime∕internal∕atomic·Cas64(SB)
+TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
+ JMP runtime∕internal∕atomic·Cas(SB)
+
TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16
JMP runtime∕internal∕atomic·Load64(SB)
@@ -130,6 +133,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
XCHGL AX, 0(BX)
RET
+TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Store(SB)
+
TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
diff --git a/src/runtime/internal/atomic/asm_amd64p32.s b/src/runtime/internal/atomic/asm_amd64p32.s
index ff590e601b..35b5ef205e 100644
--- a/src/runtime/internal/atomic/asm_amd64p32.s
+++ b/src/runtime/internal/atomic/asm_amd64p32.s
@@ -23,6 +23,9 @@ TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-17
JMP runtime∕internal∕atomic·Cas(SB)
+TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
+ JMP runtime∕internal∕atomic·Cas(SB)
+
TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-12
JMP runtime∕internal∕atomic·Load(SB)
@@ -130,6 +133,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
XCHGL AX, 0(BX)
RET
+TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Store(SB)
+
TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
MOVL ptr+0(FP), BX
MOVQ val+8(FP), AX
diff --git a/src/runtime/internal/atomic/asm_arm.s b/src/runtime/internal/atomic/asm_arm.s
index 09724c1c34..d4ef11560e 100644
--- a/src/runtime/internal/atomic/asm_arm.s
+++ b/src/runtime/internal/atomic/asm_arm.s
@@ -53,12 +53,18 @@ casfail:
TEXT runtime∕internal∕atomic·Loadp(SB),NOSPLIT|NOFRAME,$0-8
B runtime∕internal∕atomic·Load(SB)
+TEXT runtime∕internal∕atomic·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8
+ B runtime∕internal∕atomic·Load(SB)
+
TEXT runtime∕internal∕atomic·Casuintptr(SB),NOSPLIT,$0-13
B runtime∕internal∕atomic·Cas(SB)
TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0-13
B runtime∕internal∕atomic·Cas(SB)
+TEXT runtime∕internal∕atomic·CasRel(SB),NOSPLIT,$0-13
+ B runtime∕internal∕atomic·Cas(SB)
+
TEXT runtime∕internal∕atomic·Loaduintptr(SB),NOSPLIT,$0-8
B runtime∕internal∕atomic·Load(SB)
@@ -71,6 +77,9 @@ TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8
TEXT runtime∕internal∕atomic·StorepNoWB(SB),NOSPLIT,$0-8
B runtime∕internal∕atomic·Store(SB)
+TEXT runtime∕internal∕atomic·StoreRel(SB),NOSPLIT,$0-8
+ B runtime∕internal∕atomic·Store(SB)
+
TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-12
B runtime∕internal∕atomic·Xadd(SB)
diff --git a/src/runtime/internal/atomic/asm_arm64.s b/src/runtime/internal/atomic/asm_arm64.s
index 56b89a5a0b..8336a859ad 100644
--- a/src/runtime/internal/atomic/asm_arm64.s
+++ b/src/runtime/internal/atomic/asm_arm64.s
@@ -29,6 +29,9 @@ ok:
TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
B runtime∕internal∕atomic·Cas64(SB)
+TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
+ B runtime∕internal∕atomic·Cas(SB)
+
TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16
B runtime∕internal∕atomic·Load64(SB)
diff --git a/src/runtime/internal/atomic/asm_mips64x.s b/src/runtime/internal/atomic/asm_mips64x.s
index 19d131e5a6..9cb10371b7 100644
--- a/src/runtime/internal/atomic/asm_mips64x.s
+++ b/src/runtime/internal/atomic/asm_mips64x.s
@@ -62,6 +62,9 @@ cas64_fail:
TEXT ·Casuintptr(SB), NOSPLIT, $0-25
JMP ·Cas64(SB)
+TEXT ·CasRel(SB), NOSPLIT, $0-17
+ JMP ·Cas(SB)
+
TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16
JMP ·Load64(SB)
@@ -152,6 +155,9 @@ TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
JMP ·Store64(SB)
+TEXT ·StoreRel(SB), NOSPLIT, $0-12
+ JMP ·Store(SB)
+
TEXT ·Store(SB), NOSPLIT, $0-12
MOVV ptr+0(FP), R1
MOVW val+8(FP), R2
diff --git a/src/runtime/internal/atomic/asm_mipsx.s b/src/runtime/internal/atomic/asm_mipsx.s
index 30550fd02e..73d7ea3ad4 100644
--- a/src/runtime/internal/atomic/asm_mipsx.s
+++ b/src/runtime/internal/atomic/asm_mipsx.s
@@ -70,6 +70,9 @@ try_xchg:
TEXT ·Casuintptr(SB),NOSPLIT,$0-13
JMP ·Cas(SB)
+TEXT ·CasRel(SB),NOSPLIT,$0-13
+ JMP ·Cas(SB)
+
TEXT ·Loaduintptr(SB),NOSPLIT,$0-8
JMP ·Load(SB)
@@ -100,6 +103,9 @@ TEXT ·Xchguintptr(SB),NOSPLIT,$0-12
TEXT ·StorepNoWB(SB),NOSPLIT,$0-8
JMP ·Store(SB)
+TEXT ·StoreRel(SB),NOSPLIT,$0-8
+ JMP ·Store(SB)
+
// void Or8(byte volatile*, byte);
TEXT ·Or8(SB),NOSPLIT,$0-5
MOVW ptr+0(FP), R1
diff --git a/src/runtime/internal/atomic/asm_ppc64x.s b/src/runtime/internal/atomic/asm_ppc64x.s
index a2ed4adc91..052b031cfb 100644
--- a/src/runtime/internal/atomic/asm_ppc64x.s
+++ b/src/runtime/internal/atomic/asm_ppc64x.s
@@ -59,6 +59,24 @@ cas64_fail:
MOVB R0, ret+24(FP)
RET
+TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
+ MOVD ptr+0(FP), R3
+ MOVWZ old+8(FP), R4
+ MOVWZ new+12(FP), R5
+ LWSYNC
+cas_again:
+ LWAR (R3), $0, R6 // 0 = Mutex release hint
+ CMPW R6, R4
+ BNE cas_fail
+ STWCCC R5, (R3)
+ BNE cas_again
+ MOVD $1, R3
+ MOVB R3, ret+16(FP)
+ RET
+cas_fail:
+ MOVB R0, ret+16(FP)
+ RET
+
TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
BR runtime∕internal∕atomic·Cas64(SB)
@@ -159,6 +177,13 @@ TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
MOVD R4, 0(R3)
RET
+TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
+ MOVD ptr+0(FP), R3
+ MOVW val+8(FP), R4
+ LWSYNC
+ MOVW R4, 0(R3)
+ RET
+
// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3
diff --git a/src/runtime/internal/atomic/asm_s390x.s b/src/runtime/internal/atomic/asm_s390x.s
index e25703e077..512fde5a12 100644
--- a/src/runtime/internal/atomic/asm_s390x.s
+++ b/src/runtime/internal/atomic/asm_s390x.s
@@ -48,6 +48,10 @@ cas64_fail:
TEXT ·Casuintptr(SB), NOSPLIT, $0-25
BR ·Cas64(SB)
+// func CasRel(ptr *uint32, old, new uint32) bool
+TEXT ·CasRel(SB), NOSPLIT, $0-17
+ BR ·Cas(SB)
+
// func Loaduintptr(ptr *uintptr) uintptr
TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
BR ·Load64(SB)
diff --git a/src/runtime/internal/atomic/atomic_386.go b/src/runtime/internal/atomic/atomic_386.go
index 4284d2bd7d..ad71ebd971 100644
--- a/src/runtime/internal/atomic/atomic_386.go
+++ b/src/runtime/internal/atomic/atomic_386.go
@@ -20,6 +20,12 @@ func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
return *(*unsafe.Pointer)(ptr)
}
+//go:nosplit
+//go:noinline
+func LoadAcq(ptr *uint32) uint32 {
+ return *ptr
+}
+
//go:noescape
func Xadd64(ptr *uint64, delta int64) uint64
@@ -52,11 +58,17 @@ func Or8(ptr *uint8, val uint8)
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
//go:noescape
func Store(ptr *uint32, val uint32)
//go:noescape
func Store64(ptr *uint64, val uint64)
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
+
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/runtime/internal/atomic/atomic_amd64x.go b/src/runtime/internal/atomic/atomic_amd64x.go
index 54851d30f4..d4fe461609 100644
--- a/src/runtime/internal/atomic/atomic_amd64x.go
+++ b/src/runtime/internal/atomic/atomic_amd64x.go
@@ -26,6 +26,12 @@ func Load64(ptr *uint64) uint64 {
return *ptr
}
+//go:nosplit
+//go:noinline
+func LoadAcq(ptr *uint32) uint32 {
+ return *ptr
+}
+
//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
@@ -55,12 +61,18 @@ func Or8(ptr *uint8, val uint8)
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
//go:noescape
func Store(ptr *uint32, val uint32)
//go:noescape
func Store64(ptr *uint64, val uint64)
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
+
// StorepNoWB performs *ptr = val atomically and without a write
// barrier.
//
diff --git a/src/runtime/internal/atomic/atomic_arm.go b/src/runtime/internal/atomic/atomic_arm.go
index 1ecdb11db9..51b42ba238 100644
--- a/src/runtime/internal/atomic/atomic_arm.go
+++ b/src/runtime/internal/atomic/atomic_arm.go
@@ -74,6 +74,9 @@ func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
//go:noescape
func Store(addr *uint32, v uint32)
+//go:noescape
+func StoreRel(addr *uint32, v uint32)
+
//go:nosplit
func goCas64(addr *uint64, old, new uint64) bool {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
@@ -181,9 +184,15 @@ func Load(addr *uint32) uint32
//go:noescape
func Loadp(addr unsafe.Pointer) unsafe.Pointer
+//go:noescape
+func LoadAcq(addr *uint32) uint32
+
//go:noescape
func Cas64(addr *uint64, old, new uint64) bool
+//go:noescape
+func CasRel(addr *uint32, old, new uint32) bool
+
//go:noescape
func Xadd64(addr *uint64, delta int64) uint64
diff --git a/src/runtime/internal/atomic/atomic_arm64.go b/src/runtime/internal/atomic/atomic_arm64.go
index 3554b7f236..a2da27e7ed 100644
--- a/src/runtime/internal/atomic/atomic_arm64.go
+++ b/src/runtime/internal/atomic/atomic_arm64.go
@@ -35,6 +35,9 @@ func Load64(ptr *uint64) uint64
//go:noescape
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+//go:noescape
+func LoadAcq(addr *uint32) uint32
+
//go:noescape
func Or8(ptr *uint8, val uint8)
@@ -44,6 +47,9 @@ func And8(ptr *uint8, val uint8)
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
//go:noescape
func Store(ptr *uint32, val uint32)
@@ -52,3 +58,6 @@ func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
diff --git a/src/runtime/internal/atomic/atomic_arm64.s b/src/runtime/internal/atomic/atomic_arm64.s
index 354fd1e94b..c979f2246f 100644
--- a/src/runtime/internal/atomic/atomic_arm64.s
+++ b/src/runtime/internal/atomic/atomic_arm64.s
@@ -25,9 +25,16 @@ TEXT ·Loadp(SB),NOSPLIT,$0-16
MOVD R0, ret+8(FP)
RET
+// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* addr)
+TEXT ·LoadAcq(SB),NOSPLIT,$0-12
+ B ·Load(SB)
+
TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
B runtime∕internal∕atomic·Store64(SB)
+TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
+ B runtime∕internal∕atomic·Store(SB)
+
TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
MOVD ptr+0(FP), R0
MOVW val+8(FP), R1
diff --git a/src/runtime/internal/atomic/atomic_mips64x.go b/src/runtime/internal/atomic/atomic_mips64x.go
index d06ea4809a..98a8fca929 100644
--- a/src/runtime/internal/atomic/atomic_mips64x.go
+++ b/src/runtime/internal/atomic/atomic_mips64x.go
@@ -35,6 +35,9 @@ func Load64(ptr *uint64) uint64
//go:noescape
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+//go:noescape
+func LoadAcq(ptr *uint32) uint32
+
//go:noescape
func And8(ptr *uint8, val uint8)
@@ -46,6 +49,9 @@ func Or8(ptr *uint8, val uint8)
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
//go:noescape
func Store(ptr *uint32, val uint32)
@@ -54,3 +60,6 @@ func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
diff --git a/src/runtime/internal/atomic/atomic_mips64x.s b/src/runtime/internal/atomic/atomic_mips64x.s
index 087672f5cc..5214afe2d6 100644
--- a/src/runtime/internal/atomic/atomic_mips64x.s
+++ b/src/runtime/internal/atomic/atomic_mips64x.s
@@ -34,3 +34,7 @@ TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
SYNC
MOVV R1, ret+8(FP)
RET
+
+// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr)
+TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
+ JMP atomic·Load(SB)
diff --git a/src/runtime/internal/atomic/atomic_mipsx.go b/src/runtime/internal/atomic/atomic_mipsx.go
index 55943f6925..1cd6d9a9ce 100644
--- a/src/runtime/internal/atomic/atomic_mipsx.go
+++ b/src/runtime/internal/atomic/atomic_mipsx.go
@@ -119,6 +119,9 @@ func Load(ptr *uint32) uint32
//go:noescape
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+//go:noescape
+func LoadAcq(ptr *uint32) uint32
+
//go:noescape
func And8(ptr *uint8, val uint8)
@@ -130,3 +133,9 @@ func Store(ptr *uint32, val uint32)
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
+
+//go:noescape
+func CasRel(addr *uint32, old, new uint32) bool
diff --git a/src/runtime/internal/atomic/atomic_ppc64x.go b/src/runtime/internal/atomic/atomic_ppc64x.go
index 72c98eb0c5..4f1a95c5bd 100644
--- a/src/runtime/internal/atomic/atomic_ppc64x.go
+++ b/src/runtime/internal/atomic/atomic_ppc64x.go
@@ -35,6 +35,9 @@ func Load64(ptr *uint64) uint64
//go:noescape
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+//go:noescape
+func LoadAcq(ptr *uint32) uint32
+
//go:noescape
func And8(ptr *uint8, val uint8)
@@ -46,11 +49,17 @@ func Or8(ptr *uint8, val uint8)
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
//go:noescape
func Store(ptr *uint32, val uint32)
//go:noescape
func Store64(ptr *uint64, val uint64)
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
+
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/runtime/internal/atomic/atomic_ppc64x.s b/src/runtime/internal/atomic/atomic_ppc64x.s
index c9c2d1fc0c..c079ea494f 100644
--- a/src/runtime/internal/atomic/atomic_ppc64x.s
+++ b/src/runtime/internal/atomic/atomic_ppc64x.s
@@ -38,3 +38,12 @@ TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$-8-16
ISYNC
MOVD R3, ret+8(FP)
RET
+
+// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr)
+TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$-8-12
+ MOVD ptr+0(FP), R3
+ MOVWZ 0(R3), R3
+ CMPW R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7, 0x4
+ MOVW R3, ret+8(FP)
+ RET
diff --git a/src/runtime/internal/atomic/atomic_s390x.go b/src/runtime/internal/atomic/atomic_s390x.go
index 9343853485..ec294a27ba 100644
--- a/src/runtime/internal/atomic/atomic_s390x.go
+++ b/src/runtime/internal/atomic/atomic_s390x.go
@@ -24,6 +24,12 @@ func Load64(ptr *uint64) uint64 {
return *ptr
}
+//go:nosplit
+//go:noinline
+func LoadAcq(ptr *uint32) uint32 {
+ return *ptr
+}
+
//go:noinline
//go:nosplit
func Store(ptr *uint32, val uint32) {
@@ -43,6 +49,12 @@ func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) {
*(*uintptr)(ptr) = uintptr(val)
}
+//go:noinline
+//go:nosplit
+func StoreRel(ptr *uint32, val uint32) {
+ *ptr = val
+}
+
//go:noescape
func And8(ptr *uint8, val uint8)
@@ -71,3 +83,6 @@ func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
diff --git a/src/runtime/internal/atomic/atomic_wasm.go b/src/runtime/internal/atomic/atomic_wasm.go
index cbf254fcb5..71288e9003 100644
--- a/src/runtime/internal/atomic/atomic_wasm.go
+++ b/src/runtime/internal/atomic/atomic_wasm.go
@@ -21,6 +21,12 @@ func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
return *(*unsafe.Pointer)(ptr)
}
+//go:nosplit
+//go:noinline
+func LoadAcq(ptr *uint32) uint32 {
+ return *ptr
+}
+
//go:nosplit
//go:noinline
func Load64(ptr *uint64) uint64 {
@@ -105,6 +111,12 @@ func Store(ptr *uint32, val uint32) {
*ptr = val
}
+//go:nosplit
+//go:noinline
+func StoreRel(ptr *uint32, val uint32) {
+ *ptr = val
+}
+
//go:nosplit
//go:noinline
func Store64(ptr *uint64, val uint64) {
@@ -147,6 +159,16 @@ func Casuintptr(ptr *uintptr, old, new uintptr) bool {
return false
}
+//go:nosplit
+//go:noinline
+func CasRel(ptr *uint32, old, new uint32) bool {
+ if *ptr == old {
+ *ptr = new
+ return true
+ }
+ return false
+}
+
//go:nosplit
//go:noinline
func Storeuintptr(ptr *uintptr, new uintptr) {
diff --git a/src/runtime/internal/math/math.go b/src/runtime/internal/math/math.go
new file mode 100644
index 0000000000..5385f5dd86
--- /dev/null
+++ b/src/runtime/internal/math/math.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+import "runtime/internal/sys"
+
+const MaxUintptr = ^uintptr(0)
+
+// MulUintptr returns a * b and whether the multiplication overflowed.
+// On supported platforms this is an intrinsic lowered by the compiler.
+func MulUintptr(a, b uintptr) (uintptr, bool) {
+ if a|b < 1<<(4*sys.PtrSize) || a == 0 {
+ return a * b, false
+ }
+ overflow := b > MaxUintptr/a
+ return a * b, overflow
+}
diff --git a/src/runtime/internal/math/math_test.go b/src/runtime/internal/math/math_test.go
new file mode 100644
index 0000000000..303eb63405
--- /dev/null
+++ b/src/runtime/internal/math/math_test.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math_test
+
+import (
+ . "runtime/internal/math"
+ "testing"
+)
+
+const (
+ UintptrSize = 32 << (^uintptr(0) >> 63)
+)
+
+type mulUintptrTest struct {
+ a uintptr
+ b uintptr
+ overflow bool
+}
+
+var mulUintptrTests = []mulUintptrTest{
+ {0, 0, false},
+ {1000, 1000, false},
+ {MaxUintptr, 0, false},
+ {MaxUintptr, 1, false},
+ {MaxUintptr / 2, 2, false},
+ {MaxUintptr / 2, 3, true},
+ {MaxUintptr, 10, true},
+ {MaxUintptr, 100, true},
+ {MaxUintptr / 100, 100, false},
+ {MaxUintptr / 1000, 1001, true},
+ {1<<(UintptrSize/2) - 1, 1<<(UintptrSize/2) - 1, false},
+ {1 << (UintptrSize / 2), 1 << (UintptrSize / 2), true},
+ {MaxUintptr >> 32, MaxUintptr >> 32, false},
+ {MaxUintptr, MaxUintptr, true},
+}
+
+func TestMulUintptr(t *testing.T) {
+ for _, test := range mulUintptrTests {
+ a, b := test.a, test.b
+ for i := 0; i < 2; i++ {
+ mul, overflow := MulUintptr(a, b)
+ if mul != a*b || overflow != test.overflow {
+ t.Errorf("MulUintptr(%v, %v) = %v, %v want %v, %v",
+ a, b, mul, overflow, a*b, test.overflow)
+ }
+ a, b = b, a
+ }
+ }
+}
+
+var SinkUintptr uintptr
+var SinkBool bool
+
+var x, y uintptr
+
+func BenchmarkMulUintptr(b *testing.B) {
+ x, y = 1, 2
+ b.Run("small", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var overflow bool
+ SinkUintptr, overflow = MulUintptr(x, y)
+ if overflow {
+ SinkUintptr = 0
+ }
+ }
+ })
+ x, y = MaxUintptr, MaxUintptr-1
+ b.Run("large", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var overflow bool
+ SinkUintptr, overflow = MulUintptr(x, y)
+ if overflow {
+ SinkUintptr = 0
+ }
+ }
+ })
+}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index c3fe1169dc..12fa744052 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -106,6 +106,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/math"
"runtime/internal/sys"
"unsafe"
)
@@ -136,8 +137,7 @@ const (
_TinySize = 16
_TinySizeClass = int8(2)
- _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
- _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
+ _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
// Per-P, per order stack segment cache size.
_StackCacheSize = 32 * 1024
@@ -216,16 +216,16 @@ const (
// The number of bits in a heap address, the size of heap
// arenas, and the L1 and L2 arena map sizes are related by
//
- // (1 << addrBits) = arenaBytes * L1entries * L2entries
+ // (1 << addr bits) = arena size * L1 entries * L2 entries
//
// Currently, we balance these as follows:
//
- // Platform Addr bits Arena size L1 entries L2 size
- // -------------- --------- ---------- ---------- -------
- // */64-bit 48 64MB 1 32MB
- // windows/64-bit 48 4MB 64 8MB
- // */32-bit 32 4MB 1 4KB
- // */mips(le) 31 4MB 1 2KB
+ // Platform Addr bits Arena size L1 entries L2 entries
+ // -------------- --------- ---------- ---------- -----------
+ // */64-bit 48 64MB 1 4M (32MB)
+ // windows/64-bit 48 4MB 64 1M (8MB)
+ // */32-bit 32 4MB 1 1024 (4KB)
+ // */mips(le) 31 4MB 1 512 (2KB)
// heapArenaBytes is the size of a heap arena. The heap
// consists of mappings of size heapArenaBytes, aligned to
@@ -1041,10 +1041,11 @@ func newarray(typ *_type, n int) unsafe.Pointer {
if n == 1 {
return mallocgc(typ.size, typ, true)
}
- if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
+ mem, overflow := math.MulUintptr(typ.size, uintptr(n))
+ if overflow || mem > maxAlloc || n < 0 {
panic(plainError("runtime: allocation size out of range"))
}
- return mallocgc(typ.size*uintptr(n), typ, true)
+ return mallocgc(mem, typ, true)
}
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
diff --git a/src/runtime/map.go b/src/runtime/map.go
index c3fcfbfdbe..3e368f929f 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -55,6 +55,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/math"
"runtime/internal/sys"
"unsafe"
)
@@ -296,7 +297,8 @@ func makemap_small() *hmap {
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
func makemap(t *maptype, hint int, h *hmap) *hmap {
- if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) {
+ mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
+ if overflow || mem > maxAlloc {
hint = 0
}
@@ -306,7 +308,8 @@ func makemap(t *maptype, hint int, h *hmap) *hmap {
}
h.hash0 = fastrand()
- // find size parameter which will hold the requested # of elements
+ // Find the size parameter B which will hold the requested # of elements.
+ // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
B := uint8(0)
for overLoadFactor(hint, B) {
B++
diff --git a/src/runtime/map_benchmark_test.go b/src/runtime/map_benchmark_test.go
index 025c0398d3..5681d5eeb8 100644
--- a/src/runtime/map_benchmark_test.go
+++ b/src/runtime/map_benchmark_test.go
@@ -228,6 +228,23 @@ func benchmarkRepeatedLookup(b *testing.B, lookupKeySize int) {
func BenchmarkRepeatedLookupStrMapKey32(b *testing.B) { benchmarkRepeatedLookup(b, 32) }
func BenchmarkRepeatedLookupStrMapKey1M(b *testing.B) { benchmarkRepeatedLookup(b, 1<<20) }
+func BenchmarkMakeMap(b *testing.B) {
+ b.Run("[Byte]Byte", func(b *testing.B) {
+ var m map[byte]byte
+ for i := 0; i < b.N; i++ {
+ m = make(map[byte]byte, 10)
+ }
+ hugeSink = m
+ })
+ b.Run("[Int]Int", func(b *testing.B) {
+ var m map[int]int
+ for i := 0; i < b.N; i++ {
+ m = make(map[int]int, 10)
+ }
+ hugeSink = m
+ })
+}
+
func BenchmarkNewEmptyMap(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
@@ -370,3 +387,37 @@ func BenchmarkGoMapClear(b *testing.B) {
}
})
}
+
+func BenchmarkMapStringConversion(b *testing.B) {
+ for _, length := range []int{32, 64} {
+ b.Run(strconv.Itoa(length), func(b *testing.B) {
+ bytes := make([]byte, length)
+ b.Run("simple", func(b *testing.B) {
+ b.ReportAllocs()
+ m := make(map[string]int)
+ m[string(bytes)] = 0
+ for i := 0; i < b.N; i++ {
+ _ = m[string(bytes)]
+ }
+ })
+ b.Run("struct", func(b *testing.B) {
+ b.ReportAllocs()
+ type stringstruct struct{ s string }
+ m := make(map[stringstruct]int)
+ m[stringstruct{string(bytes)}] = 0
+ for i := 0; i < b.N; i++ {
+ _ = m[stringstruct{string(bytes)}]
+ }
+ })
+ b.Run("array", func(b *testing.B) {
+ b.ReportAllocs()
+ type stringarray [1]string
+ m := make(map[stringarray]int)
+ m[stringarray{string(bytes)}] = 0
+ for i := 0; i < b.N; i++ {
+ _ = m[stringarray{string(bytes)}]
+ }
+ })
+ })
+ }
+}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 87fa027b4e..4854c0e632 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -283,9 +283,7 @@ func (m markBits) isMarked() bool {
return *m.bytep&m.mask != 0
}
-// setMarked sets the marked bit in the markbits, atomically. Some compilers
-// are not able to inline atomic.Or8 function so if it appears as a hot spot consider
-// inlining it manually.
+// setMarked sets the marked bit in the markbits, atomically.
func (m markBits) setMarked() {
// Might be racing with other updates, so use atomic update always.
// We used to be clever here and use a non-atomic update in certain
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
index 9ca8e5d222..d9bc8b4719 100644
--- a/src/runtime/mcentral.go
+++ b/src/runtime/mcentral.go
@@ -117,8 +117,7 @@ havespan:
if trace.enabled && !traceDone {
traceGCSweepDone()
}
- cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.allocCount)
+ n := int(s.nelems) - int(s.allocCount)
if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
throw("span has no free objects")
}
@@ -153,16 +152,6 @@ func (c *mcentral) uncacheSpan(s *mspan) {
throw("uncaching span but s.allocCount == 0")
}
- cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.allocCount)
-
- // cacheSpan updated alloc assuming all objects on s were
- // going to be allocated. Adjust for any that weren't. We must
- // do this before potentially sweeping the span.
- if n > 0 {
- atomic.Xadd64(&c.nmalloc, -int64(n))
- }
-
sg := mheap_.sweepgen
stale := s.sweepgen == sg+1
if stale {
@@ -170,18 +159,22 @@ func (c *mcentral) uncacheSpan(s *mspan) {
// responsibility to sweep it.
//
// Set sweepgen to indicate it's not cached but needs
- // sweeping. sweep will set s.sweepgen to indicate s
- // is swept.
- s.sweepgen = sg - 1
- s.sweep(true)
- // sweep may have freed objects, so recompute n.
- n = cap - int32(s.allocCount)
+ // sweeping and can't be allocated from. sweep will
+ // set s.sweepgen to indicate s is swept.
+ atomic.Store(&s.sweepgen, sg-1)
} else {
// Indicate that s is no longer cached.
- s.sweepgen = sg
+ atomic.Store(&s.sweepgen, sg)
}
+ n := int(s.nelems) - int(s.allocCount)
if n > 0 {
+ // cacheSpan updated alloc assuming all objects on s
+ // were going to be allocated. Adjust for any that
+ // weren't. We must do this before potentially
+ // sweeping the span.
+ atomic.Xadd64(&c.nmalloc, -int64(n))
+
lock(&c.lock)
c.empty.remove(s)
c.nonempty.insert(s)
@@ -197,6 +190,12 @@ func (c *mcentral) uncacheSpan(s *mspan) {
}
unlock(&c.lock)
}
+
+ if stale {
+ // Now that s is in the right mcentral list, we can
+ // sweep it.
+ s.sweep(false)
+ }
}
// freeSpan updates c and s after sweeping s.
@@ -244,7 +243,7 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
c.nonempty.remove(s)
unlock(&c.lock)
- mheap_.freeSpan(s, 0)
+ mheap_.freeSpan(s, false)
return true
}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 9dfee5a4dc..e12df7f7d2 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -789,7 +789,7 @@ func gcSetTriggerRatio(triggerRatio float64) {
trigger = uint64(float64(memstats.heap_marked) * (1 + triggerRatio))
// Don't trigger below the minimum heap size.
minTrigger := heapminimum
- if !gosweepdone() {
+ if !isSweepDone() {
// Concurrent sweep happens in the heap growth
// from heap_live to gc_trigger, so ensure
// that concurrent sweep has some heap growth
@@ -834,7 +834,7 @@ func gcSetTriggerRatio(triggerRatio float64) {
}
// Update sweep pacing.
- if gosweepdone() {
+ if isSweepDone() {
mheap_.sweepPagesPerByte = 0
} else {
// Concurrent sweep needs to sweep all of the in-use
@@ -884,7 +884,7 @@ const gcGoalUtilization = 0.30
// mutator latency.
const gcBackgroundUtilization = 0.25
-// gcCreditSlack is the amount of scan work credit that can can
+// gcCreditSlack is the amount of scan work credit that can
// accumulate locally before updating gcController.scanWork and,
// optionally, gcController.bgScanCredit. Lower values give a more
// accurate assist ratio and make it more likely that assists will
@@ -1061,7 +1061,7 @@ func GC() {
// complete the cycle and because runtime.GC() is often used
// as part of tests and benchmarks to get the system into a
// relatively stable and isolated state.
- for atomic.Load(&work.cycles) == n+1 && gosweepone() != ^uintptr(0) {
+ for atomic.Load(&work.cycles) == n+1 && sweepone() != ^uintptr(0) {
sweep.nbgsweep++
Gosched()
}
@@ -1219,7 +1219,7 @@ func gcStart(trigger gcTrigger) {
//
// We check the transition condition continuously here in case
// this G gets delayed in to the next GC cycle.
- for trigger.test() && gosweepone() != ^uintptr(0) {
+ for trigger.test() && sweepone() != ^uintptr(0) {
sweep.nbgsweep++
}
diff --git a/src/runtime/mgclarge.go b/src/runtime/mgclarge.go
index e7fa831937..11a977d6ba 100644
--- a/src/runtime/mgclarge.go
+++ b/src/runtime/mgclarge.go
@@ -46,15 +46,6 @@ type treapNode struct {
priority uint32 // random number used by treap algorithm to keep tree probabilistically balanced
}
-func (t *treapNode) init() {
- t.right = nil
- t.left = nil
- t.parent = nil
- t.spanKey = nil
- t.npagesKey = 0
- t.priority = 0
-}
-
// isSpanInTreap is handy for debugging. One should hold the heap lock, usually
// mheap_.lock().
func (t *treapNode) isSpanInTreap(s *mspan) bool {
@@ -140,7 +131,6 @@ func (root *mTreap) insert(span *mspan) {
// https://faculty.washington.edu/aragon/pubs/rst89.pdf
t := (*treapNode)(mheap_.treapalloc.alloc())
- t.init()
t.npagesKey = span.npages
t.priority = fastrand()
t.spanKey = span
@@ -188,8 +178,6 @@ func (root *mTreap) removeNode(t *treapNode) {
root.treap = nil
}
// Return the found treapNode's span after freeing the treapNode.
- t.spanKey = nil
- t.npagesKey = 0
mheap_.treapalloc.free(unsafe.Pointer(t))
}
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index d4dcfb6cb9..14f09700ee 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -1228,8 +1228,7 @@ func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintp
if mbits.isMarked() {
return
}
- // mbits.setMarked() // Avoid extra call overhead with manual inlining.
- atomic.Or8(mbits.bytep, mbits.mask)
+ mbits.setMarked()
// If this is a noscan object, fast-track it to black
// instead of greying it.
if span.spanclass.noscan() {
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 00950aede2..627a6a023f 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -52,7 +52,7 @@ func bgsweep(c chan int) {
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
for {
- for gosweepone() != ^uintptr(0) {
+ for sweepone() != ^uintptr(0) {
sweep.nbgsweep++
Gosched()
}
@@ -60,7 +60,7 @@ func bgsweep(c chan int) {
Gosched()
}
lock(&sweep.lock)
- if !gosweepdone() {
+ if !isSweepDone() {
// This can happen if a GC runs between
// gosweepone returning ^0 above
// and the lock being acquired.
@@ -72,9 +72,8 @@ func bgsweep(c chan int) {
}
}
-// sweeps one span
-// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
-//go:nowritebarrier
+// sweepone sweeps one span and returns the number of pages returned
+// to the heap, or ^uintptr(0) if there was nothing to sweep.
func sweepone() uintptr {
_g_ := getg()
sweepRatio := mheap_.sweepPagesPerByte // For debugging
@@ -101,7 +100,7 @@ func sweepone() uintptr {
// This can happen if direct sweeping already
// swept this span, but in that case the sweep
// generation should always be up-to-date.
- if s.sweepgen != sg {
+ if !(s.sweepgen == sg || s.sweepgen == sg+3) {
print("runtime: bad span s.state=", s.state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
throw("non in-use span in unswept list")
}
@@ -135,17 +134,13 @@ func sweepone() uintptr {
return npages
}
-//go:nowritebarrier
-func gosweepone() uintptr {
- var ret uintptr
- systemstack(func() {
- ret = sweepone()
- })
- return ret
-}
-
-//go:nowritebarrier
-func gosweepdone() bool {
+// isSweepDone reports whether all spans are swept or currently being swept.
+//
+// Note that this condition may transition from false to true at any
+// time as the sweeper runs. It may transition from true to false if a
+// GC runs; to prevent that the caller must be non-preemptible or must
+// somehow block GC progress.
+func isSweepDone() bool {
return mheap_.sweepdone != 0
}
@@ -366,7 +361,7 @@ func (s *mspan) sweep(preserve bool) bool {
s.limit = 0 // prevent mlookup from finding this span
sysFault(unsafe.Pointer(s.base()), size)
} else {
- mheap_.freeSpan(s, 1)
+ mheap_.freeSpan(s, true)
}
c.local_nlargefree++
c.local_largefree += size
@@ -414,7 +409,7 @@ retry:
newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
- if gosweepone() == ^uintptr(0) {
+ if sweepone() == ^uintptr(0) {
mheap_.sweepPagesPerByte = 0
break
}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 7a11bdc058..33a190a4c5 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -30,13 +30,11 @@ const minPhysPageSize = 4096
//go:notinheap
type mheap struct {
lock mutex
- free [_MaxMHeapList]mSpanList // free lists of given length up to _MaxMHeapList
- freelarge mTreap // free treap of length >= _MaxMHeapList
- busy [_MaxMHeapList]mSpanList // busy lists of large spans of given length
- busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList
- sweepgen uint32 // sweep generation, see comment in mspan
- sweepdone uint32 // all spans are swept
- sweepers uint32 // number of active sweepone calls
+ free mTreap // free treap of spans
+ busy mSpanList // busy list of spans
+ sweepgen uint32 // sweep generation, see comment in mspan
+ sweepdone uint32 // all spans are swept
+ sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan
// appears exactly once.
@@ -351,6 +349,34 @@ func (s *mspan) layout() (size, n, total uintptr) {
return
}
+func (s *mspan) scavenge() uintptr {
+ start := s.base()
+ end := start + s.npages<<_PageShift
+ if physPageSize > _PageSize {
+ // We can only release pages in
+ // physPageSize blocks, so round start
+ // and end in. (Otherwise, madvise
+ // will round them *out* and release
+ // more memory than we want.)
+ start = (start + physPageSize - 1) &^ (physPageSize - 1)
+ end &^= physPageSize - 1
+ if end <= start {
+ // start and end don't span a
+ // whole physical page.
+ return 0
+ }
+ }
+ len := end - start
+ released := len - (s.npreleased << _PageShift)
+ if physPageSize > _PageSize && released == 0 {
+ return 0
+ }
+ memstats.heap_released += uint64(released)
+ s.npreleased = len >> _PageShift
+ sysUnused(unsafe.Pointer(start), len)
+ return released
+}
+
// recordspan adds a newly allocated span to h.allspans.
//
// This only happens the first time a span is allocated from
@@ -571,12 +597,7 @@ func (h *mheap) init() {
h.spanalloc.zero = false
// h->mapcache needs no init
- for i := range h.free {
- h.free[i].init()
- h.busy[i].init()
- }
-
- h.busylarge.init()
+ h.busy.init()
for i := range h.central {
h.central[i].mcentral.init(spanClass(i))
}
@@ -619,30 +640,12 @@ retry:
// Sweeps and reclaims at least npage pages into heap.
// Called before allocating npage pages.
func (h *mheap) reclaim(npage uintptr) {
- // First try to sweep busy spans with large objects of size >= npage,
- // this has good chances of reclaiming the necessary space.
- for i := int(npage); i < len(h.busy); i++ {
- if h.reclaimList(&h.busy[i], npage) != 0 {
- return // Bingo!
- }
- }
-
- // Then -- even larger objects.
- if h.reclaimList(&h.busylarge, npage) != 0 {
+ if h.reclaimList(&h.busy, npage) != 0 {
return // Bingo!
}
- // Now try smaller objects.
- // One such object is not enough, so we need to reclaim several of them.
- reclaimed := uintptr(0)
- for i := 0; i < int(npage) && i < len(h.busy); i++ {
- reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
- if reclaimed >= npage {
- return
- }
- }
-
// Now sweep everything that is not yet swept.
+ var reclaimed uintptr
unlock(&h.lock)
for {
n := sweepone()
@@ -657,13 +660,14 @@ func (h *mheap) reclaim(npage uintptr) {
lock(&h.lock)
}
-// Allocate a new span of npage pages from the heap for GC'd memory
-// and record its size class in the HeapMap and HeapMapCache.
+// alloc_m is the internal implementation of mheap.alloc.
+//
+// alloc_m must run on the system stack because it locks the heap, so
+// any stack growth during alloc_m would self-deadlock.
+//
+//go:systemstack
func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
_g_ := getg()
- if _g_ != _g_.m.g0 {
- throw("_mheap_alloc not on g0 stack")
- }
lock(&h.lock)
// To prevent excessive heap growth, before allocating n pages
@@ -672,7 +676,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
// TODO(austin): This tends to sweep a large number of
// spans in order to find a few completely free spans
// (for example, in the garbage benchmark, this sweeps
- // ~30x the number of pages its trying to allocate).
+ // ~30x the number of pages it's trying to allocate).
// If GC kept a bit for whether there were any marks
// in a span, we could release these free spans
// at the end of GC and eliminate this entirely.
@@ -723,11 +727,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
mheap_.nlargealloc++
atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
// Swept spans are at the end of lists.
- if s.npages < uintptr(len(h.busy)) {
- h.busy[s.npages].insertBack(s)
- } else {
- h.busylarge.insertBack(s)
- }
+ h.busy.insertBack(s)
}
}
// heap_scan and heap_live were updated.
@@ -752,6 +752,12 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
return s
}
+// alloc allocates a new span of npage pages from the GC'd heap.
+//
+// Either large must be true or spanclass must indicates the span's
+// size class and scannability.
+//
+// If needzero is true, the memory for the returned span will be zeroed.
func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan {
// Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs
@@ -832,31 +838,20 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
// The returned span has been removed from the
// free list, but its state is still mSpanFree.
func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
- var list *mSpanList
var s *mspan
- // Try in fixed-size lists up to max.
- for i := int(npage); i < len(h.free); i++ {
- list = &h.free[i]
- if !list.isEmpty() {
- s = list.first
- list.remove(s)
- goto HaveSpan
- }
- }
- // Best fit in list of large spans.
- s = h.allocLarge(npage) // allocLarge removed s from h.freelarge for us
+ // Best fit in the treap of spans.
+ s = h.free.remove(npage)
if s == nil {
if !h.grow(npage) {
return nil
}
- s = h.allocLarge(npage)
+ s = h.free.remove(npage)
if s == nil {
return nil
}
}
-HaveSpan:
// Mark span in use.
if s.state != mSpanFree {
throw("MHeap_AllocLocked - MSpan not free")
@@ -898,21 +893,6 @@ HaveSpan:
return s
}
-// Large spans have a minimum size of 1MByte. The maximum number of large spans to support
-// 1TBytes is 1 million, experimentation using random sizes indicates that the depth of
-// the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced
-// by a perfectly balanced tree with a depth of 20. Twice that is an acceptable 40.
-func (h *mheap) isLargeSpan(npages uintptr) bool {
- return npages >= uintptr(len(h.free))
-}
-
-// allocLarge allocates a span of at least npage pages from the treap of large spans.
-// Returns nil if no such span currently exists.
-func (h *mheap) allocLarge(npage uintptr) *mspan {
- // Search treap for smallest span with >= npage pages.
- return h.freelarge.remove(npage)
-}
-
// Try to add at least npage pages of memory to the heap,
// returning whether it worked.
//
@@ -938,7 +918,10 @@ func (h *mheap) grow(npage uintptr) bool {
}
// Free the span back into the heap.
-func (h *mheap) freeSpan(s *mspan, acct int32) {
+//
+// large must match the value of large passed to mheap.alloc. This is
+// used for accounting.
+func (h *mheap) freeSpan(s *mspan, large bool) {
systemstack(func() {
mp := getg().m
lock(&h.lock)
@@ -952,7 +935,8 @@ func (h *mheap) freeSpan(s *mspan, acct int32) {
bytes := s.npages << _PageShift
msanfree(base, bytes)
}
- if acct != 0 {
+ if large {
+ // Match accounting done in mheap.alloc.
memstats.heap_objects--
}
if gcBlackenEnabled != 0 {
@@ -984,7 +968,7 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
unlock(&h.lock)
}
-// s must be on a busy list (h.busy or h.busylarge) or unlinked.
+// s must be on the busy list or unlinked.
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state {
case mSpanManual:
@@ -1009,7 +993,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
}
s.state = mSpanFree
if s.inList() {
- h.busyList(s.npages).remove(s)
+ h.busy.remove(s)
}
// Stamp newly unused spans. The scavenger will use that
@@ -1030,12 +1014,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
h.setSpan(before.base(), s)
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
- if h.isLargeSpan(before.npages) {
- // We have a t, it is large so it has to be in the treap so we can remove it.
- h.freelarge.removeSpan(before)
- } else {
- h.freeList(before.npages).remove(before)
- }
+ h.free.removeSpan(before)
before.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(before))
}
@@ -1046,105 +1025,23 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
s.npreleased += after.npreleased
s.needzero |= after.needzero
h.setSpan(s.base()+s.npages*pageSize-1, s)
- if h.isLargeSpan(after.npages) {
- h.freelarge.removeSpan(after)
- } else {
- h.freeList(after.npages).remove(after)
- }
+ h.free.removeSpan(after)
after.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(after))
}
- // Insert s into appropriate list or treap.
- if h.isLargeSpan(s.npages) {
- h.freelarge.insert(s)
- } else {
- h.freeList(s.npages).insert(s)
- }
-}
-
-func (h *mheap) freeList(npages uintptr) *mSpanList {
- return &h.free[npages]
-}
-
-func (h *mheap) busyList(npages uintptr) *mSpanList {
- if npages < uintptr(len(h.busy)) {
- return &h.busy[npages]
- }
- return &h.busylarge
+ // Insert s into the free treap.
+ h.free.insert(s)
}
func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
s := t.spanKey
- var sumreleased uintptr
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
- start := s.base()
- end := start + s.npages<<_PageShift
- if physPageSize > _PageSize {
- // We can only release pages in
- // physPageSize blocks, so round start
- // and end in. (Otherwise, madvise
- // will round them *out* and release
- // more memory than we want.)
- start = (start + physPageSize - 1) &^ (physPageSize - 1)
- end &^= physPageSize - 1
- if end <= start {
- // start and end don't span a
- // whole physical page.
- return sumreleased
- }
+ if released := s.scavenge(); released != 0 {
+ return released
}
- len := end - start
- released := len - (s.npreleased << _PageShift)
- if physPageSize > _PageSize && released == 0 {
- return sumreleased
- }
- memstats.heap_released += uint64(released)
- sumreleased += released
- s.npreleased = len >> _PageShift
- sysUnused(unsafe.Pointer(start), len)
}
- return sumreleased
-}
-
-func scavengelist(list *mSpanList, now, limit uint64) uintptr {
- if list.isEmpty() {
- return 0
- }
-
- var sumreleased uintptr
- for s := list.first; s != nil; s = s.next {
- if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
- continue
- }
- start := s.base()
- end := start + s.npages<<_PageShift
- if physPageSize > _PageSize {
- // We can only release pages in
- // physPageSize blocks, so round start
- // and end in. (Otherwise, madvise
- // will round them *out* and release
- // more memory than we want.)
- start = (start + physPageSize - 1) &^ (physPageSize - 1)
- end &^= physPageSize - 1
- if end <= start {
- // start and end don't span a
- // whole physical page.
- continue
- }
- }
- len := end - start
-
- released := len - (s.npreleased << _PageShift)
- if physPageSize > _PageSize && released == 0 {
- continue
- }
- memstats.heap_released += uint64(released)
- sumreleased += released
- s.npreleased = len >> _PageShift
- sysUnused(unsafe.Pointer(start), len)
- }
- return sumreleased
+ return 0
}
func (h *mheap) scavenge(k int32, now, limit uint64) {
@@ -1154,11 +1051,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) {
gp := getg()
gp.m.mallocing++
lock(&h.lock)
- var sumreleased uintptr
- for i := 0; i < len(h.free); i++ {
- sumreleased += scavengelist(&h.free[i], now, limit)
- }
- sumreleased += scavengetreap(h.freelarge.treap, now, limit)
+ sumreleased := scavengetreap(h.free.treap, now, limit)
unlock(&h.lock)
gp.m.mallocing--
diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go
index 8f082ba6a0..207b0e4d4d 100644
--- a/src/runtime/os_linux_arm.go
+++ b/src/runtime/os_linux_arm.go
@@ -4,20 +4,14 @@
package runtime
-import (
- "internal/cpu"
- "unsafe"
-)
+import "internal/cpu"
const (
- _AT_PLATFORM = 15 // introduced in at least 2.6.11
-
_HWCAP_VFP = 1 << 6 // introduced in at least 2.6.11
_HWCAP_VFPv3 = 1 << 13 // introduced in 2.6.30
)
var randomNumber uint32
-var armArch uint8 = 6 // we default to ARMv6
func checkgoarm() {
// On Android, /proc/self/auxv might be unreadable and hwcap won't
@@ -47,12 +41,6 @@ func archauxv(tag, val uintptr) {
randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 |
uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24
- case _AT_PLATFORM: // v5l, v6l, v7l
- t := *(*uint8)(unsafe.Pointer(val + 1))
- if '5' <= t && t <= '7' {
- armArch = t - '0'
- }
-
case _AT_HWCAP:
cpu.HWCap = uint(val)
case _AT_HWCAP2:
diff --git a/src/runtime/pprof/internal/profile/profile.go b/src/runtime/pprof/internal/profile/profile.go
index 863bd403a4..84e607e9a8 100644
--- a/src/runtime/pprof/internal/profile/profile.go
+++ b/src/runtime/pprof/internal/profile/profile.go
@@ -415,16 +415,16 @@ func (p *Profile) String() string {
for _, m := range p.Mapping {
bits := ""
if m.HasFunctions {
- bits = bits + "[FN]"
+ bits += "[FN]"
}
if m.HasFilenames {
- bits = bits + "[FL]"
+ bits += "[FL]"
}
if m.HasLineNumbers {
- bits = bits + "[LN]"
+ bits += "[LN]"
}
if m.HasInlineFrames {
- bits = bits + "[IN]"
+ bits += "[IN]"
}
ss = append(ss, fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
m.ID,
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index acfdc8472e..844e023715 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -478,12 +478,12 @@ const (
)
// cpuinit extracts the environment variable GODEBUGCPU from the environment on
-// Linux and Darwin if the GOEXPERIMENT debugcpu was set and calls internal/cpu.Initialize.
+// Linux and Darwin and calls internal/cpu.Initialize.
func cpuinit() {
const prefix = "GODEBUGCPU="
var env string
- if haveexperiment("debugcpu") && (GOOS == "linux" || GOOS == "darwin") {
+ if GOOS == "linux" || GOOS == "darwin" {
cpu.DebugOptions = true
// Similar to goenv_unix but extracts the environment value for
@@ -1878,7 +1878,7 @@ func startTemplateThread() {
// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
-// may not be a a good state.
+// may not be in a good state.
//
// Many programs never need this, so templateThread is started lazily
// when we first enter a state that might lead to running on a thread
@@ -4765,11 +4765,11 @@ func runqput(_p_ *p, gp *g, next bool) {
}
retry:
- h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
t := _p_.runqtail
if t-h < uint32(len(_p_.runq)) {
_p_.runq[t%uint32(len(_p_.runq))].set(gp)
- atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
return
}
if runqputslow(_p_, gp, h, t) {
@@ -4793,7 +4793,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
for i := uint32(0); i < n; i++ {
batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
}
- if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
return false
}
batch[n] = gp
@@ -4837,13 +4837,13 @@ func runqget(_p_ *p) (gp *g, inheritTime bool) {
}
for {
- h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
t := _p_.runqtail
if t == h {
return nil, false
}
gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
- if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
return gp, false
}
}
@@ -4855,8 +4855,8 @@ func runqget(_p_ *p) (gp *g, inheritTime bool) {
// Can be executed by any P.
func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
for {
- h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
n := t - h
n = n - n/2
if n == 0 {
@@ -4899,7 +4899,7 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
batch[(batchHead+i)%uint32(len(batch))] = g
}
- if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
return n
}
}
@@ -4919,11 +4919,11 @@ func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
if n == 0 {
return gp
}
- h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
if t-h+n >= uint32(len(_p_.runq)) {
throw("runqsteal: runq overflow")
}
- atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
return gp
}
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 4206f4384a..9a081043b0 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "runtime/internal/math"
"runtime/internal/sys"
"unsafe"
)
@@ -22,28 +23,6 @@ type notInHeapSlice struct {
cap int
}
-// maxElems is a lookup table containing the maximum capacity for a slice.
-// The index is the size of the slice element.
-var maxElems = [...]uintptr{
- ^uintptr(0),
- maxAlloc / 1, maxAlloc / 2, maxAlloc / 3, maxAlloc / 4,
- maxAlloc / 5, maxAlloc / 6, maxAlloc / 7, maxAlloc / 8,
- maxAlloc / 9, maxAlloc / 10, maxAlloc / 11, maxAlloc / 12,
- maxAlloc / 13, maxAlloc / 14, maxAlloc / 15, maxAlloc / 16,
- maxAlloc / 17, maxAlloc / 18, maxAlloc / 19, maxAlloc / 20,
- maxAlloc / 21, maxAlloc / 22, maxAlloc / 23, maxAlloc / 24,
- maxAlloc / 25, maxAlloc / 26, maxAlloc / 27, maxAlloc / 28,
- maxAlloc / 29, maxAlloc / 30, maxAlloc / 31, maxAlloc / 32,
-}
-
-// maxSliceCap returns the maximum capacity for a slice.
-func maxSliceCap(elemsize uintptr) uintptr {
- if elemsize < uintptr(len(maxElems)) {
- return maxElems[elemsize]
- }
- return maxAlloc / elemsize
-}
-
func panicmakeslicelen() {
panic(errorString("makeslice: len out of range"))
}
@@ -53,21 +32,21 @@ func panicmakeslicecap() {
}
func makeslice(et *_type, len, cap int) slice {
- // NOTE: The len > maxElements check here is not strictly necessary,
- // but it produces a 'len out of range' error instead of a 'cap out of range' error
- // when someone does make([]T, bignumber). 'cap out of range' is true too,
- // but since the cap is only being supplied implicitly, saying len is clearer.
- // See issue 4085.
- maxElements := maxSliceCap(et.size)
- if len < 0 || uintptr(len) > maxElements {
- panicmakeslicelen()
- }
-
- if cap < len || uintptr(cap) > maxElements {
+ mem, overflow := math.MulUintptr(et.size, uintptr(cap))
+ if overflow || mem > maxAlloc || len < 0 || len > cap {
+ // NOTE: Produce a 'len out of range' error instead of a
+ // 'cap out of range' error when someone does make([]T, bignumber).
+ // 'cap out of range' is true too, but since the cap is only being
+ // supplied implicitly, saying len is clearer.
+ // See golang.org/issue/4085.
+ mem, overflow := math.MulUintptr(et.size, uintptr(len))
+ if overflow || mem > maxAlloc || len < 0 {
+ panicmakeslicelen()
+ }
panicmakeslicecap()
}
+ p := mallocgc(mem, et, true)
- p := mallocgc(et.size*uintptr(cap), et, true)
return slice{p, len, cap}
}
@@ -104,10 +83,11 @@ func growslice(et *_type, old slice, cap int) slice {
msanread(old.array, uintptr(old.len*int(et.size)))
}
+ if cap < old.cap {
+ panic(errorString("growslice: cap out of range"))
+ }
+
if et.size == 0 {
- if cap < old.cap {
- panic(errorString("growslice: cap out of range"))
- }
// append should not create a slice with nil pointer but non-zero len.
// We assume that append doesn't need to preserve old.array in this case.
return slice{unsafe.Pointer(&zerobase), old.len, cap}
@@ -169,15 +149,14 @@ func growslice(et *_type, old slice, cap int) slice {
default:
lenmem = uintptr(old.len) * et.size
newlenmem = uintptr(cap) * et.size
- capmem = roundupsize(uintptr(newcap) * et.size)
- overflow = uintptr(newcap) > maxSliceCap(et.size)
+ capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
+ capmem = roundupsize(capmem)
newcap = int(capmem / et.size)
}
- // The check of overflow (uintptr(newcap) > maxSliceCap(et.size))
- // in addition to capmem > _MaxMem is needed to prevent an overflow
- // which can be used to trigger a segfault on 32bit architectures
- // with this example program:
+ // The check of overflow in addition to capmem > maxAlloc is needed
+ // to prevent an overflow which can be used to trigger a segfault
+ // on 32bit architectures with this example program:
//
// type T [1<<27 + 1]int64
//
@@ -188,7 +167,7 @@ func growslice(et *_type, old slice, cap int) slice {
// s = append(s, d, d, d, d)
// print(len(s), "\n")
// }
- if cap < old.cap || overflow || capmem > maxAlloc {
+ if overflow || capmem > maxAlloc {
panic(errorString("growslice: cap out of range"))
}
diff --git a/src/runtime/slice_test.go b/src/runtime/slice_test.go
index c2dfb7afd1..0463fc70a7 100644
--- a/src/runtime/slice_test.go
+++ b/src/runtime/slice_test.go
@@ -10,20 +10,68 @@ import (
const N = 20
-func BenchmarkMakeSlice(b *testing.B) {
- var x []byte
- for i := 0; i < b.N; i++ {
- x = make([]byte, 32)
- _ = x
- }
-}
-
type (
struct24 struct{ a, b, c int64 }
struct32 struct{ a, b, c, d int64 }
struct40 struct{ a, b, c, d, e int64 }
)
+func BenchmarkMakeSlice(b *testing.B) {
+ const length = 2
+ b.Run("Byte", func(b *testing.B) {
+ var x []byte
+ for i := 0; i < b.N; i++ {
+ x = make([]byte, length, 2*length)
+ _ = x
+ }
+ })
+ b.Run("Int16", func(b *testing.B) {
+ var x []int16
+ for i := 0; i < b.N; i++ {
+ x = make([]int16, length, 2*length)
+ _ = x
+ }
+ })
+ b.Run("Int", func(b *testing.B) {
+ var x []int
+ for i := 0; i < b.N; i++ {
+ x = make([]int, length, 2*length)
+ _ = x
+ }
+ })
+ b.Run("Ptr", func(b *testing.B) {
+ var x []*byte
+ for i := 0; i < b.N; i++ {
+ x = make([]*byte, length, 2*length)
+ _ = x
+ }
+ })
+ b.Run("Struct", func(b *testing.B) {
+ b.Run("24", func(b *testing.B) {
+ var x []struct24
+ for i := 0; i < b.N; i++ {
+ x = make([]struct24, length, 2*length)
+ _ = x
+ }
+ })
+ b.Run("32", func(b *testing.B) {
+ var x []struct32
+ for i := 0; i < b.N; i++ {
+ x = make([]struct32, length, 2*length)
+ _ = x
+ }
+ })
+ b.Run("40", func(b *testing.B) {
+ var x []struct40
+ for i := 0; i < b.N; i++ {
+ x = make([]struct40, length, 2*length)
+ _ = x
+ }
+ })
+
+ })
+}
+
func BenchmarkGrowSlice(b *testing.B) {
b.Run("Byte", func(b *testing.B) {
x := make([]byte, 9)
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index b815aa859e..65aa7dbd59 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -1279,7 +1279,7 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args
p = add(p, sys.PtrSize)
*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
// Note: the noescape above is needed to keep
- // getStackMap from from "leaking param content:
+ // getStackMap from "leaking param content:
// frame". That leak propagates up to getgcmask, then
// GCMask, then verifyGCInfo, which converts the stack
// gcinfo tests into heap gcinfo tests :(
diff --git a/src/runtime/string.go b/src/runtime/string.go
index d10bd96f43..839e882cdc 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -135,7 +135,8 @@ func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
// and otherwise intrinsified by the compiler.
//
// Some internal compiler optimizations use this function.
-// - Used for m[string(k)] lookup where m is a string-keyed map and k is a []byte.
+// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)]
+// where k is []byte, T1 to Tn is a nesting of struct and array literals.
// - Used for "<"+string(b)+">" concatenation where b is []byte.
// - Used for string(b)=="foo" comparison where b is []byte.
func slicebytetostringtmp(b []byte) string {
diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s
index ed79b69257..bf01099830 100644
--- a/src/runtime/sys_linux_ppc64x.s
+++ b/src/runtime/sys_linux_ppc64x.s
@@ -301,7 +301,7 @@ TEXT runtime·_sigtramp(SB),NOSPLIT,$64
// this might be called in external code context,
// where g is not set.
- MOVB runtime·iscgo(SB), R6
+ MOVBZ runtime·iscgo(SB), R6
CMP R6, $0
BEQ 2(PC)
BL runtime·load_g(SB)
@@ -320,7 +320,7 @@ TEXT runtime·_sigtramp(SB),NOSPLIT,$64
TEXT runtime·cgoSigtramp(SB),NOSPLIT|NOFRAME,$0
// The stack unwinder, presumably written in C, may not be able to
// handle Go frame correctly. So, this function is NOFRAME, and we
- // we save/restore LR manually.
+ // save/restore LR manually.
MOVD LR, R10
// We're coming from C code, initialize essential registers.
diff --git a/src/runtime/syscall_aix.go b/src/runtime/syscall_aix.go
new file mode 100644
index 0000000000..376e22d59a
--- /dev/null
+++ b/src/runtime/syscall_aix.go
@@ -0,0 +1,212 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// This file handles some syscalls from the syscall package
+// Especially, syscalls use during forkAndExecInChild which must not split the stack
+
+//go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_execve execve "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_fork fork "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setgid setgid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setsid setsid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setuid setuid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setpgid setpgid "libc.a/shr_64.o"
+
+//go:linkname libc_chdir libc_chdir
+//go:linkname libc_chroot libc_chroot
+//go:linkname libc_dup2 libc_dup2
+//go:linkname libc_execve libc_execve
+//go:linkname libc_fcntl libc_fcntl
+//go:linkname libc_fork libc_fork
+//go:linkname libc_ioctl libc_ioctl
+//go:linkname libc_setgid libc_setgid
+//go:linkname libc_setgroups libc_setgroups
+//go:linkname libc_setsid libc_setsid
+//go:linkname libc_setuid libc_setuid
+//go:linkname libc_setpgid libc_setpgid
+
+var (
+ libc_chdir,
+ libc_chroot,
+ libc_dup2,
+ libc_execve,
+ libc_fcntl,
+ libc_fork,
+ libc_ioctl,
+ libc_setgid,
+ libc_setgroups,
+ libc_setsid,
+ libc_setuid,
+ libc_setpgid libFunc
+)
+
+// In syscall_syscall6 and syscall_rawsyscall6, r2 is always 0
+// as it's never used on AIX
+// TODO: remove r2 from zsyscall_aix_$GOARCH.go
+
+// Syscall is needed because some packages (like net) need it too.
+// The best way is to return EINVAL and let Golang handles its failure
+// If the syscall can't fail, this function can redirect it to a real syscall.
+//go:linkname syscall_Syscall syscall.Syscall
+//go:nosplit
+func syscall_Syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ return 0, 0, _EINVAL
+}
+
+// This is syscall.RawSyscall, it exists to satisfy some build dependency,
+// but it doesn't work.
+//go:linkname syscall_RawSyscall syscall.RawSyscall
+func syscall_RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ panic("RawSyscall not available on AIX")
+}
+
+//go:linkname syscall_syscall6 syscall.syscall6
+//go:nosplit
+func syscall_syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ c := getg().m.libcall
+ c.fn = uintptr(unsafe.Pointer(fn))
+ c.n = nargs
+ c.args = uintptr(noescape(unsafe.Pointer(&a1)))
+
+ entersyscallblock()
+ asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(&c))
+ exitsyscall()
+ return c.r1, 0, c.err
+}
+
+//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
+//go:nosplit
+func syscall_rawSyscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ c := getg().m.libcall
+ c.fn = uintptr(unsafe.Pointer(fn))
+ c.n = nargs
+ c.args = uintptr(noescape(unsafe.Pointer(&a1)))
+
+ asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(&c))
+
+ return c.r1, 0, c.err
+}
+
+//go:linkname syscall_chdir syscall.chdir
+//go:nosplit
+func syscall_chdir(path uintptr) (err uintptr) {
+ _, err = syscall1(&libc_chdir, path)
+ return
+}
+
+//go:linkname syscall_chroot1 syscall.chroot1
+//go:nosplit
+func syscall_chroot1(path uintptr) (err uintptr) {
+ _, err = syscall1(&libc_chroot, path)
+ return
+}
+
+// like close, but must not split stack, for fork.
+//go:linkname syscall_close syscall.close
+//go:nosplit
+func syscall_close(fd int32) int32 {
+ _, err := syscall1(&libc_close, uintptr(fd))
+ return int32(err)
+}
+
+//go:linkname syscall_dup2child syscall.dup2child
+//go:nosplit
+func syscall_dup2child(old, new uintptr) (val, err uintptr) {
+ val, err = syscall2(&libc_dup2, old, new)
+ return
+}
+
+//go:linkname syscall_execve syscall.execve
+//go:nosplit
+func syscall_execve(path, argv, envp uintptr) (err uintptr) {
+ _, err = syscall3(&libc_execve, path, argv, envp)
+ return
+}
+
+// like exit, but must not split stack, for fork.
+//go:linkname syscall_exit syscall.exit
+//go:nosplit
+func syscall_exit(code uintptr) {
+ syscall1(&libc_exit, code)
+}
+
+//go:linkname syscall_fcntl1 syscall.fcntl1
+//go:nosplit
+func syscall_fcntl1(fd, cmd, arg uintptr) (val, err uintptr) {
+ val, err = syscall3(&libc_fcntl, fd, cmd, arg)
+ return
+
+}
+
+//go:linkname syscall_forkx syscall.forkx
+//go:nosplit
+func syscall_forkx(flags uintptr) (pid uintptr, err uintptr) {
+ pid, err = syscall1(&libc_fork, flags)
+ return
+}
+
+//go:linkname syscall_getpid syscall.getpid
+//go:nosplit
+func syscall_getpid() (pid, err uintptr) {
+ pid, err = syscall0(&libc_getpid)
+ return
+}
+
+//go:linkname syscall_ioctl syscall.ioctl
+//go:nosplit
+func syscall_ioctl(fd, req, arg uintptr) (err uintptr) {
+ _, err = syscall3(&libc_ioctl, fd, req, arg)
+ return
+}
+
+//go:linkname syscall_setgid syscall.setgid
+//go:nosplit
+func syscall_setgid(gid uintptr) (err uintptr) {
+ _, err = syscall1(&libc_setgid, gid)
+ return
+}
+
+//go:linkname syscall_setgroups1 syscall.setgroups1
+//go:nosplit
+func syscall_setgroups1(ngid, gid uintptr) (err uintptr) {
+ _, err = syscall2(&libc_setgroups, ngid, gid)
+ return
+}
+
+//go:linkname syscall_setsid syscall.setsid
+//go:nosplit
+func syscall_setsid() (pid, err uintptr) {
+ pid, err = syscall0(&libc_setsid)
+ return
+}
+
+//go:linkname syscall_setuid syscall.setuid
+//go:nosplit
+func syscall_setuid(uid uintptr) (err uintptr) {
+ _, err = syscall1(&libc_setuid, uid)
+ return
+}
+
+//go:linkname syscall_setpgid syscall.setpgid
+//go:nosplit
+func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
+ _, err = syscall2(&libc_setpgid, pid, pgid)
+ return
+}
+
+//go:linkname syscall_write1 syscall.write1
+//go:nosplit
+func syscall_write1(fd, buf, nbyte uintptr) (n, err uintptr) {
+ n, err = syscall3(&libc_write, fd, buf, nbyte)
+ return
+}
diff --git a/src/runtime/syscall_solaris.go b/src/runtime/syscall_solaris.go
index 9f05a47892..94e018d479 100644
--- a/src/runtime/syscall_solaris.go
+++ b/src/runtime/syscall_solaris.go
@@ -83,6 +83,13 @@ func syscall_close(fd int32) int32 {
return int32(sysvicall1(&libc_close, uintptr(fd)))
}
+const _F_DUP2FD = 0x9
+
+//go:nosplit
+func syscall_dup2(oldfd, newfd uintptr) (val, err uintptr) {
+ return syscall_fcntl(oldfd, _F_DUP2FD, newfd)
+}
+
//go:nosplit
func syscall_execve(path, argv, envp uintptr) (err uintptr) {
call := libcall{
diff --git a/src/runtime/tls_ppc64x.s b/src/runtime/tls_ppc64x.s
index ed94989b69..c697449282 100644
--- a/src/runtime/tls_ppc64x.s
+++ b/src/runtime/tls_ppc64x.s
@@ -23,9 +23,11 @@
//
// NOTE: setg_gcc<> assume this clobbers only R31.
TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
- MOVB runtime·iscgo(SB), R31
+#ifndef GOOS_aix
+ MOVBZ runtime·iscgo(SB), R31
CMP R31, $0
BEQ nocgo
+#endif
MOVD runtime·tls_g(SB), R31
MOVD g, 0(R13)(R31*1)
diff --git a/src/strconv/atoi.go b/src/strconv/atoi.go
index bebed04820..bbfdb7dc39 100644
--- a/src/strconv/atoi.go
+++ b/src/strconv/atoi.go
@@ -44,7 +44,7 @@ const intSize = 32 << (^uint(0) >> 63)
// IntSize is the size in bits of an int or uint value.
const IntSize = intSize
-const maxUint64 = (1<<64 - 1)
+const maxUint64 = 1<<64 - 1
// ParseUint is like ParseInt but for unsigned numbers.
func ParseUint(s string, base int, bitSize int) (uint64, error) {
diff --git a/src/strconv/example_test.go b/src/strconv/example_test.go
index 15725456e2..2d1a2a9dbf 100644
--- a/src/strconv/example_test.go
+++ b/src/strconv/example_test.go
@@ -265,7 +265,7 @@ func ExampleParseUint() {
}
func ExampleQuote() {
- s := strconv.Quote(`"Fran & Freddie's Diner ☺"`)
+ s := strconv.Quote(`"Fran & Freddie's Diner ☺"`) // there is a tab character inside the string literal
fmt.Println(s)
// Output:
@@ -288,14 +288,50 @@ func ExampleQuoteRuneToASCII() {
// '\u263a'
}
+func ExampleQuoteRuneToGraphic() {
+ s := strconv.QuoteRuneToGraphic('☺')
+ fmt.Println(s)
+
+ s = strconv.QuoteRuneToGraphic('\u263a')
+ fmt.Println(s)
+
+ s = strconv.QuoteRuneToGraphic('\u000a')
+ fmt.Println(s)
+
+ s = strconv.QuoteRuneToGraphic(' ') // tab character
+ fmt.Println(s)
+
+ // Output:
+ // '☺'
+ // '☺'
+ // '\n'
+ // '\t'
+}
+
func ExampleQuoteToASCII() {
- s := strconv.QuoteToASCII(`"Fran & Freddie's Diner ☺"`)
+ s := strconv.QuoteToASCII(`"Fran & Freddie's Diner ☺"`) // there is a tab character inside the string literal
fmt.Println(s)
// Output:
// "\"Fran & Freddie's Diner\t\u263a\""
}
+func ExampleQuoteToGraphic() {
+ s := strconv.QuoteToGraphic("☺")
+ fmt.Println(s)
+
+ s = strconv.QuoteToGraphic("This is a \u263a \u000a") // there is a tab character inside the string literal
+ fmt.Println(s)
+
+ s = strconv.QuoteToGraphic(`" This is a ☺ \n "`)
+ fmt.Println(s)
+
+ // Output:
+ // "☺"
+ // "This is a ☺\t\n"
+ // "\" This is a ☺ \\n \""
+}
+
func ExampleUnquote() {
s, err := strconv.Unquote("You can't unquote a string without quotes")
fmt.Printf("%q, %v\n", s, err)
diff --git a/src/strconv/itoa.go b/src/strconv/itoa.go
index 8afe7af251..4aaf57830c 100644
--- a/src/strconv/itoa.go
+++ b/src/strconv/itoa.go
@@ -152,10 +152,14 @@ func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s
}
} else if isPowerOfTwo(base) {
- // It is known that base is a power of two and
- // 2 <= base <= len(digits).
// Use shifts and masks instead of / and %.
- shift := uint(bits.TrailingZeros(uint(base))) & 31
+ // Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36.
+ // The largest power of 2 below or equal to 36 is 32, which is 1 << 5;
+ // i.e., the largest possible shift count is 5. By &-ind that value with
+ // the constant 7 we tell the compiler that the shift count is always
+ // less than 8 which is smaller than any register width. This allows
+ // the compiler to generate better code for the shift operation.
+ shift := uint(bits.TrailingZeros(uint(base))) & 7
b := uint64(base)
m := uint(base) - 1 // == 1<= b {
diff --git a/src/strings/replace.go b/src/strings/replace.go
index 9ddf5e1e3f..ace0b8d646 100644
--- a/src/strings/replace.go
+++ b/src/strings/replace.go
@@ -459,7 +459,7 @@ func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) {
buf := make([]byte, bufsize)
for len(s) > 0 {
- ncopy := copy(buf, s[:])
+ ncopy := copy(buf, s)
s = s[ncopy:]
for i, b := range buf[:ncopy] {
buf[i] = r[b]
diff --git a/src/sync/runtime.go b/src/sync/runtime.go
index be16bcc8f7..a13d9f6cf1 100644
--- a/src/sync/runtime.go
+++ b/src/sync/runtime.go
@@ -54,7 +54,7 @@ func init() {
}
// Active spinning runtime support.
-// runtime_canSpin returns true is spinning makes sense at the moment.
+// runtime_canSpin returns true if spinning makes sense at the moment.
func runtime_canSpin(i int) bool
// runtime_doSpin does active spinning.
diff --git a/src/syscall/asm_solaris_amd64.s b/src/syscall/asm_solaris_amd64.s
index 6fa041866d..c61e04a42f 100644
--- a/src/syscall/asm_solaris_amd64.s
+++ b/src/syscall/asm_solaris_amd64.s
@@ -23,6 +23,10 @@ TEXT ·chroot1(SB),NOSPLIT,$0
TEXT ·close(SB),NOSPLIT,$0
JMP runtime·syscall_close(SB)
+TEXT ·dup2child(SB),NOSPLIT,$0
+ JMP runtime·syscall_dup2(SB)
+ RET
+
TEXT ·execve(SB),NOSPLIT,$0
JMP runtime·syscall_execve(SB)
diff --git a/src/syscall/dirent.go b/src/syscall/dirent.go
index 26cbbbce2a..5c7af42b0c 100644
--- a/src/syscall/dirent.go
+++ b/src/syscall/dirent.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package syscall
diff --git a/src/syscall/dirent_bsd_test.go b/src/syscall/dirent_bsd_test.go
new file mode 100644
index 0000000000..e5b8357af7
--- /dev/null
+++ b/src/syscall/dirent_bsd_test.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package syscall_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "syscall"
+ "testing"
+)
+
+func TestDirent(t *testing.T) {
+ const (
+ direntBufSize = 2048
+ filenameMinSize = 11
+ )
+
+ d, err := ioutil.TempDir("", "dirent-test")
+ if err != nil {
+ t.Fatalf("tempdir: %v", err)
+ }
+ defer os.RemoveAll(d)
+ t.Logf("tmpdir: %s", d)
+
+ for i, c := range []byte("0123456789") {
+ name := string(bytes.Repeat([]byte{c}, filenameMinSize+i))
+ err = ioutil.WriteFile(filepath.Join(d, name), nil, 0644)
+ if err != nil {
+ t.Fatalf("writefile: %v", err)
+ }
+ }
+
+ buf := bytes.Repeat([]byte("DEADBEAF"), direntBufSize/8)
+ fd, err := syscall.Open(d, syscall.O_RDONLY, 0)
+ defer syscall.Close(fd)
+ if err != nil {
+ t.Fatalf("syscall.open: %v", err)
+ }
+ n, err := syscall.ReadDirent(fd, buf)
+ if err != nil {
+ t.Fatalf("syscall.readdir: %v", err)
+ }
+ buf = buf[:n]
+
+ names := make([]string, 0, 10)
+ for len(buf) > 0 {
+ var bc int
+ bc, _, names = syscall.ParseDirent(buf, -1, names)
+ buf = buf[bc:]
+ }
+
+ sort.Strings(names)
+ t.Logf("names: %q", names)
+
+ if len(names) != 10 {
+ t.Errorf("got %d names; expected 10", len(names))
+ }
+ for i, name := range names {
+ ord, err := strconv.Atoi(name[:1])
+ if err != nil {
+ t.Fatalf("names[%d] is non-integer %q: %v", i, names[i], err)
+ }
+ if expected := string(strings.Repeat(name[:1], filenameMinSize+ord)); name != expected {
+ t.Errorf("names[%d] is %q (len %d); expected %q (len %d)", i, name, len(name), expected, len(expected))
+ }
+ }
+}
diff --git a/src/syscall/env_unix.go b/src/syscall/env_unix.go
index 1ebc0b17f2..0b6b711a8f 100644
--- a/src/syscall/env_unix.go
+++ b/src/syscall/env_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
// Unix environment variables.
diff --git a/src/syscall/exec_aix_test.go b/src/syscall/exec_aix_test.go
new file mode 100644
index 0000000000..22b752cf27
--- /dev/null
+++ b/src/syscall/exec_aix_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+
+package syscall
+
+import "unsafe"
+
+//go:cgo_import_dynamic libc_Getpgid getpgid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getpgrp getpgrp "libc.a/shr_64.o"
+
+//go:linkname libc_Getpgid libc_Getpgid
+//go:linkname libc_Getpgrp libc_Getpgrp
+
+var (
+ libc_Getpgid,
+ libc_Getpgrp libcFunc
+)
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Getpgrp() (pgrp int) {
+ r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Getpgrp)), 0, 0, 0, 0, 0, 0, 0)
+ pgrp = int(r0)
+ return
+}
+
+var Ioctl = ioctl
diff --git a/src/syscall/exec_solaris.go b/src/syscall/exec_libc.go
similarity index 92%
rename from src/syscall/exec_solaris.go
rename to src/syscall/exec_libc.go
index 9735ae5706..d6d34c04c3 100644
--- a/src/syscall/exec_solaris.go
+++ b/src/syscall/exec_libc.go
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build aix solaris
+
+// This file handles forkAndExecInChild function for OS using libc syscall like AIX or Solaris.
+
package syscall
import (
@@ -28,6 +32,7 @@ func runtime_AfterForkInChild()
func chdir(path uintptr) (err Errno)
func chroot1(path uintptr) (err Errno)
func close(fd uintptr) (err Errno)
+func dup2child(old uintptr, new uintptr) (val uintptr, err Errno)
func execve(path uintptr, argv uintptr, envp uintptr) (err Errno)
func exit(code uintptr)
func fcntl1(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err Errno)
@@ -43,7 +48,7 @@ func write1(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err Errno)
// syscall defines this global on our behalf to avoid a build dependency on other platforms
func init() {
- execveSolaris = execve
+ execveLibc = execve
}
// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
@@ -178,7 +183,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
// Pass 1: look for fd[i] < i and move those up above len(fd)
// so that pass 2 won't stomp on an fd it needs later.
if pipe < nextfd {
- _, err1 = fcntl1(uintptr(pipe), F_DUP2FD, uintptr(nextfd))
+ _, err1 = dup2child(uintptr(pipe), uintptr(nextfd))
if err1 != 0 {
goto childerror
}
@@ -191,11 +196,14 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
if nextfd == pipe { // don't stomp on pipe
nextfd++
}
- _, err1 = fcntl1(uintptr(fd[i]), F_DUP2FD, uintptr(nextfd))
+ _, err1 = dup2child(uintptr(fd[i]), uintptr(nextfd))
+ if err1 != 0 {
+ goto childerror
+ }
+ _, err1 = fcntl1(uintptr(nextfd), F_SETFD, FD_CLOEXEC)
if err1 != 0 {
goto childerror
}
- fcntl1(uintptr(nextfd), F_SETFD, FD_CLOEXEC)
fd[i] = nextfd
nextfd++
}
@@ -218,7 +226,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
}
// The new fd is created NOT close-on-exec,
// which is exactly what we want.
- _, err1 = fcntl1(uintptr(fd[i]), F_DUP2FD, uintptr(i))
+ _, err1 = dup2child(uintptr(fd[i]), uintptr(i))
if err1 != 0 {
goto childerror
}
@@ -242,6 +250,11 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
// Set the controlling TTY to Ctty
if sys.Setctty {
+ // On AIX, TIOCSCTTY is undefined
+ if TIOCSCTTY == 0 {
+ err1 = ENOSYS
+ goto childerror
+ }
err1 = ioctl(uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0)
if err1 != 0 {
goto childerror
diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go
index 9a950ac17f..3b84256b8e 100644
--- a/src/syscall/exec_unix.go
+++ b/src/syscall/exec_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// Fork, exec, wait, etc.
@@ -246,9 +246,9 @@ func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle
func runtime_BeforeExec()
func runtime_AfterExec()
-// execveSolaris is non-nil on Solaris, set to execve in exec_solaris.go; this
+// execveLibc is non-nil on OS using libc syscall, set to execve in exec_libc.go; this
// avoids a build dependency for other platforms.
-var execveSolaris func(path uintptr, argv uintptr, envp uintptr) (err Errno)
+var execveLibc func(path uintptr, argv uintptr, envp uintptr) (err Errno)
// Exec invokes the execve(2) system call.
func Exec(argv0 string, argv []string, envv []string) (err error) {
@@ -267,9 +267,9 @@ func Exec(argv0 string, argv []string, envv []string) (err error) {
runtime_BeforeExec()
var err1 Errno
- if runtime.GOOS == "solaris" {
- // RawSyscall should never be used on Solaris.
- err1 = execveSolaris(
+ if runtime.GOOS == "solaris" || runtime.GOOS == "aix" {
+ // RawSyscall should never be used on Solaris or AIX.
+ err1 = execveLibc(
uintptr(unsafe.Pointer(argv0p)),
uintptr(unsafe.Pointer(&argvp[0])),
uintptr(unsafe.Pointer(&envvp[0])))
diff --git a/src/syscall/exec_unix_test.go b/src/syscall/exec_unix_test.go
index 9bb95c0f39..33614f5221 100644
--- a/src/syscall/exec_unix_test.go
+++ b/src/syscall/exec_unix_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package syscall_test
diff --git a/src/syscall/flock_aix.go b/src/syscall/flock_aix.go
new file mode 100644
index 0000000000..9745236dcb
--- /dev/null
+++ b/src/syscall/flock_aix.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+import "unsafe"
+
+// On AIX, there is no flock() system call, we emulate it.
+// Moreover, we can't call the default fcntl syscall because the arguments
+// must be integer and it's not possible to transform a pointer (lk)
+// to a int value.
+// It's easier to call syscall6 than to transform fcntl for every GOOS.
+func fcntlFlock(fd, cmd int, lk *Flock_t) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func Flock(fd int, op int) (err error) {
+ lk := &Flock_t{}
+ if (op & LOCK_UN) != 0 {
+ lk.Type = F_UNLCK
+ } else if (op & LOCK_EX) != 0 {
+ lk.Type = F_WRLCK
+ } else if (op & LOCK_SH) != 0 {
+ lk.Type = F_RDLCK
+ } else {
+ return nil
+ }
+ if (op & LOCK_NB) != 0 {
+ err = fcntlFlock(fd, F_SETLK, lk)
+ if err != nil && (err == EAGAIN || err == EACCES) {
+ return EWOULDBLOCK
+ }
+ return err
+ }
+ return fcntlFlock(fd, F_SETLKW, lk)
+}
diff --git a/src/syscall/forkpipe.go b/src/syscall/forkpipe.go
index 71890a29ba..d9999cb8b8 100644
--- a/src/syscall/forkpipe.go
+++ b/src/syscall/forkpipe.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly solaris
+// +build aix darwin dragonfly solaris
package syscall
diff --git a/src/syscall/fs_js.go b/src/syscall/fs_js.go
index 00d6c76979..22a055a040 100644
--- a/src/syscall/fs_js.go
+++ b/src/syscall/fs_js.go
@@ -81,15 +81,15 @@ func Open(path string, openmode int, perm uint32) (int, error) {
return 0, errors.New("syscall.Open: O_SYNC is not supported by js/wasm")
}
- jsFD, err := fsCall("openSync", path, flags, perm)
+ jsFD, err := fsCall("open", path, flags, perm)
if err != nil {
return 0, err
}
fd := jsFD.Int()
var entries []string
- if stat, err := fsCall("fstatSync", fd); err == nil && stat.Call("isDirectory").Bool() {
- dir, err := fsCall("readdirSync", path)
+ if stat, err := fsCall("fstat", fd); err == nil && stat.Call("isDirectory").Bool() {
+ dir, err := fsCall("readdir", path)
if err != nil {
return 0, err
}
@@ -113,7 +113,7 @@ func Close(fd int) error {
filesMu.Lock()
delete(files, fd)
filesMu.Unlock()
- _, err := fsCall("closeSync", fd)
+ _, err := fsCall("close", fd)
return err
}
@@ -125,7 +125,7 @@ func Mkdir(path string, perm uint32) error {
if err := checkPath(path); err != nil {
return err
}
- _, err := fsCall("mkdirSync", path, perm)
+ _, err := fsCall("mkdir", path, perm)
return err
}
@@ -182,7 +182,7 @@ func Stat(path string, st *Stat_t) error {
if err := checkPath(path); err != nil {
return err
}
- jsSt, err := fsCall("statSync", path)
+ jsSt, err := fsCall("stat", path)
if err != nil {
return err
}
@@ -194,7 +194,7 @@ func Lstat(path string, st *Stat_t) error {
if err := checkPath(path); err != nil {
return err
}
- jsSt, err := fsCall("lstatSync", path)
+ jsSt, err := fsCall("lstat", path)
if err != nil {
return err
}
@@ -203,7 +203,7 @@ func Lstat(path string, st *Stat_t) error {
}
func Fstat(fd int, st *Stat_t) error {
- jsSt, err := fsCall("fstatSync", fd)
+ jsSt, err := fsCall("fstat", fd)
if err != nil {
return err
}
@@ -215,7 +215,7 @@ func Unlink(path string) error {
if err := checkPath(path); err != nil {
return err
}
- _, err := fsCall("unlinkSync", path)
+ _, err := fsCall("unlink", path)
return err
}
@@ -223,7 +223,7 @@ func Rmdir(path string) error {
if err := checkPath(path); err != nil {
return err
}
- _, err := fsCall("rmdirSync", path)
+ _, err := fsCall("rmdir", path)
return err
}
@@ -231,12 +231,12 @@ func Chmod(path string, mode uint32) error {
if err := checkPath(path); err != nil {
return err
}
- _, err := fsCall("chmodSync", path, mode)
+ _, err := fsCall("chmod", path, mode)
return err
}
func Fchmod(fd int, mode uint32) error {
- _, err := fsCall("fchmodSync", fd, mode)
+ _, err := fsCall("fchmod", fd, mode)
return err
}
@@ -267,7 +267,7 @@ func UtimesNano(path string, ts []Timespec) error {
}
atime := ts[0].Sec
mtime := ts[1].Sec
- _, err := fsCall("utimesSync", path, atime, mtime)
+ _, err := fsCall("utimes", path, atime, mtime)
return err
}
@@ -278,7 +278,7 @@ func Rename(from, to string) error {
if err := checkPath(to); err != nil {
return err
}
- _, err := fsCall("renameSync", from, to)
+ _, err := fsCall("rename", from, to)
return err
}
@@ -286,12 +286,12 @@ func Truncate(path string, length int64) error {
if err := checkPath(path); err != nil {
return err
}
- _, err := fsCall("truncateSync", path, length)
+ _, err := fsCall("truncate", path, length)
return err
}
func Ftruncate(fd int, length int64) error {
- _, err := fsCall("ftruncateSync", fd, length)
+ _, err := fsCall("ftruncate", fd, length)
return err
}
@@ -299,7 +299,7 @@ func Getcwd(buf []byte) (n int, err error) {
defer recoverErr(&err)
cwd := jsProcess.Call("cwd").String()
n = copy(buf, cwd)
- return n, nil
+ return
}
func Chdir(path string) (err error) {
@@ -323,7 +323,7 @@ func Readlink(path string, buf []byte) (n int, err error) {
if err := checkPath(path); err != nil {
return 0, err
}
- dst, err := fsCall("readlinkSync", path)
+ dst, err := fsCall("readlink", path)
if err != nil {
return 0, err
}
@@ -338,7 +338,7 @@ func Link(path, link string) error {
if err := checkPath(link); err != nil {
return err
}
- _, err := fsCall("linkSync", path, link)
+ _, err := fsCall("link", path, link)
return err
}
@@ -349,12 +349,12 @@ func Symlink(path, link string) error {
if err := checkPath(link); err != nil {
return err
}
- _, err := fsCall("symlinkSync", path, link)
+ _, err := fsCall("symlink", path, link)
return err
}
func Fsync(fd int) error {
- _, err := fsCall("fsyncSync", fd)
+ _, err := fsCall("fsync", fd)
return err
}
@@ -371,7 +371,7 @@ func Read(fd int, b []byte) (int, error) {
}
a := js.TypedArrayOf(b)
- n, err := fsCall("readSync", fd, a, 0, len(b))
+ n, err := fsCall("read", fd, a, 0, len(b), nil)
a.Release()
if err != nil {
return 0, err
@@ -394,7 +394,7 @@ func Write(fd int, b []byte) (int, error) {
}
a := js.TypedArrayOf(b)
- n, err := fsCall("writeSync", fd, a, 0, len(b))
+ n, err := fsCall("write", fd, a, 0, len(b), nil)
a.Release()
if err != nil {
return 0, err
@@ -406,7 +406,7 @@ func Write(fd int, b []byte) (int, error) {
func Pread(fd int, b []byte, offset int64) (int, error) {
a := js.TypedArrayOf(b)
- n, err := fsCall("readSync", fd, a, 0, len(b), offset)
+ n, err := fsCall("read", fd, a, 0, len(b), offset)
a.Release()
if err != nil {
return 0, err
@@ -416,7 +416,7 @@ func Pread(fd int, b []byte, offset int64) (int, error) {
func Pwrite(fd int, b []byte, offset int64) (int, error) {
a := js.TypedArrayOf(b)
- n, err := fsCall("writeSync", fd, a, 0, len(b), offset)
+ n, err := fsCall("write", fd, a, 0, len(b), offset)
a.Release()
if err != nil {
return 0, err
@@ -467,10 +467,31 @@ func Pipe(fd []int) error {
return ENOSYS
}
-func fsCall(name string, args ...interface{}) (res js.Value, err error) {
- defer recoverErr(&err)
- res = jsFS.Call(name, args...)
- return
+func fsCall(name string, args ...interface{}) (js.Value, error) {
+ type callResult struct {
+ val js.Value
+ err error
+ }
+
+ c := make(chan callResult)
+ jsFS.Call(name, append(args, js.NewCallback(func(args []js.Value) {
+ var res callResult
+
+ if len(args) >= 1 { // on Node.js 8, fs.utimes calls the callback without any arguments
+ if jsErr := args[0]; jsErr != js.Null() {
+ res.err = mapJSError(jsErr)
+ }
+ }
+
+ res.val = js.Undefined()
+ if len(args) >= 2 {
+ res.val = args[1]
+ }
+
+ c <- res
+ }))...)
+ res := <-c
+ return res.val, res.err
}
// checkPath checks that the path is not empty and that it contains no null characters.
@@ -492,10 +513,15 @@ func recoverErr(errPtr *error) {
if !ok {
panic(err)
}
- errno, ok := errnoByCode[jsErr.Get("code").String()]
- if !ok {
- panic(err)
- }
- *errPtr = errnoErr(Errno(errno))
+ *errPtr = mapJSError(jsErr.Value)
}
}
+
+// mapJSError maps an error given by Node.js to the appropriate Go error
+func mapJSError(jsErr js.Value) error {
+ errno, ok := errnoByCode[jsErr.Get("code").String()]
+ if !ok {
+ panic(jsErr)
+ }
+ return errnoErr(Errno(errno))
+}
diff --git a/src/syscall/js/js.go b/src/syscall/js/js.go
index 336586ca2d..9d826c3886 100644
--- a/src/syscall/js/js.go
+++ b/src/syscall/js/js.go
@@ -16,15 +16,17 @@ import (
)
// ref is used to identify a JavaScript value, since the value itself can not be passed to WebAssembly.
-// A JavaScript number (64-bit float, except NaN) is represented by its IEEE 754 binary representation.
+//
+// The JavaScript value "undefined" is represented by the value 0.
+// A JavaScript number (64-bit float, except 0 and NaN) is represented by its IEEE 754 binary representation.
// All other values are represented as an IEEE 754 binary representation of NaN with bits 0-31 used as
// an ID and bits 32-33 used to differentiate between string, symbol, function and object.
type ref uint64
-// nanHead are the upper 32 bits of a ref which are set if the value is not a JavaScript number or NaN itself.
+// nanHead are the upper 32 bits of a ref which are set if the value is not encoded as an IEEE 754 number (see above).
const nanHead = 0x7FF80000
-// Value represents a JavaScript value.
+// Value represents a JavaScript value. The zero value is the JavaScript value "undefined".
type Value struct {
ref ref
}
@@ -38,6 +40,9 @@ func predefValue(id uint32) Value {
}
func floatValue(f float64) Value {
+ if f == 0 {
+ return valueZero
+ }
if f != f {
return valueNaN
}
@@ -56,8 +61,9 @@ func (e Error) Error() string {
}
var (
+ valueUndefined = Value{ref: 0}
valueNaN = predefValue(0)
- valueUndefined = predefValue(1)
+ valueZero = predefValue(1)
valueNull = predefValue(2)
valueTrue = predefValue(3)
valueFalse = predefValue(4)
@@ -318,13 +324,18 @@ func (v Value) New(args ...interface{}) Value {
func valueNew(v ref, args []ref) (ref, bool)
func (v Value) isNumber() bool {
- return v.ref>>32&nanHead != nanHead || v.ref == valueNaN.ref
+ return v.ref == valueZero.ref ||
+ v.ref == valueNaN.ref ||
+ (v.ref != valueUndefined.ref && v.ref>>32&nanHead != nanHead)
}
func (v Value) float(method string) float64 {
if !v.isNumber() {
panic(&ValueError{method, v.Type()})
}
+ if v.ref == valueZero.ref {
+ return 0
+ }
return *(*float64)(unsafe.Pointer(&v.ref))
}
diff --git a/src/syscall/js/js_test.go b/src/syscall/js/js_test.go
index 9cc931a31d..ed39fe3714 100644
--- a/src/syscall/js/js_test.go
+++ b/src/syscall/js/js_test.go
@@ -22,6 +22,7 @@ var dummys = js.Global().Call("eval", `({
add: function(a, b) {
return a + b;
},
+ zero: 0,
NaN: NaN,
})`)
@@ -74,6 +75,9 @@ func TestInt(t *testing.T) {
if dummys.Get("someInt") != dummys.Get("someInt") {
t.Errorf("same value not equal")
}
+ if got := dummys.Get("zero").Int(); got != 0 {
+ t.Errorf("got %#v, want %#v", got, 0)
+ }
}
func TestIntConversion(t *testing.T) {
@@ -237,6 +241,9 @@ func TestType(t *testing.T) {
if got, want := js.ValueOf(true).Type(), js.TypeBoolean; got != want {
t.Errorf("got %s, want %s", got, want)
}
+ if got, want := js.ValueOf(0).Type(), js.TypeNumber; got != want {
+ t.Errorf("got %s, want %s", got, want)
+ }
if got, want := js.ValueOf(42).Type(), js.TypeNumber; got != want {
t.Errorf("got %s, want %s", got, want)
}
@@ -269,6 +276,13 @@ func TestValueOf(t *testing.T) {
}
}
+func TestZeroValue(t *testing.T) {
+ var v js.Value
+ if v != js.Undefined() {
+ t.Error("zero js.Value is not js.Undefined()")
+ }
+}
+
func TestCallback(t *testing.T) {
c := make(chan struct{})
cb := js.NewCallback(func(args []js.Value) {
diff --git a/src/syscall/mkall.sh b/src/syscall/mkall.sh
index b381b93161..b783921d1a 100755
--- a/src/syscall/mkall.sh
+++ b/src/syscall/mkall.sh
@@ -115,6 +115,11 @@ _* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="./mksyscall_libc.pl -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
darwin_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
@@ -301,7 +306,7 @@ plan9_386)
mktypes="XXX"
;;
solaris_amd64)
- mksyscall="./mksyscall_solaris.pl"
+ mksyscall="./mksyscall_libc.pl -solaris"
mkerrors="$mkerrors -m64"
mksysnum=
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
@@ -327,5 +332,9 @@ esac
if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
- if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then
+ # ztypes_$GOOSARCH.go could be erased before "go run mkpost.go" is called.
+ # Therefore, "go run" tries to recompile syscall package but ztypes is empty and it fails.
+ echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go.NEW && mv ztypes_$GOOSARCH.go.NEW ztypes_$GOOSARCH.go";
+ fi
) | $run
diff --git a/src/syscall/mkerrors.sh b/src/syscall/mkerrors.sh
index 93d6f7d2b6..d5880dcaf2 100755
--- a/src/syscall/mkerrors.sh
+++ b/src/syscall/mkerrors.sh
@@ -20,6 +20,16 @@ fi
uname=$(uname)
+includes_AIX='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
includes_Darwin='
#define _DARWIN_C_SOURCE
#define KERNEL
diff --git a/src/syscall/mksyscall_solaris.pl b/src/syscall/mksyscall_libc.pl
similarity index 86%
rename from src/syscall/mksyscall_solaris.pl
rename to src/syscall/mksyscall_libc.pl
index 9172975914..5ceedc812a 100755
--- a/src/syscall/mksyscall_solaris.pl
+++ b/src/syscall/mksyscall_libc.pl
@@ -19,10 +19,12 @@
use strict;
-my $cmdline = "mksyscall_solaris.pl " . join(' ', @ARGV);
+my $cmdline = "mksyscall_libc.pl " . join(' ', @ARGV);
my $errors = 0;
my $_32bit = "";
my $tags = ""; # build tags
+my $aix = 0;
+my $solaris = 0;
binmode STDOUT;
@@ -33,14 +35,23 @@ if($ARGV[0] eq "-b32") {
$_32bit = "little-endian";
shift;
}
+if($ARGV[0] eq "-aix") {
+ $aix = 1;
+ shift;
+}
+if($ARGV[0] eq "-solaris") {
+ $solaris = 1;
+ shift;
+}
if($ARGV[0] eq "-tags") {
shift;
$tags = $ARGV[0];
shift;
}
+
if($ARGV[0] =~ /^-/) {
- print STDERR "usage: mksyscall_solaris.pl [-b32 | -l32] [-tags x,y] [file ...]\n";
+ print STDERR "usage: mksyscall_libc.pl [-b32 | -l32] [-aix | -solaris] [-tags x,y] [file ...]\n";
exit 1;
}
@@ -95,9 +106,28 @@ while(<>) {
my @in = parseparamlist($in);
my @out = parseparamlist($out);
+ # Try in vain to keep people from editing this file.
+ # The theory is that they jump into the middle of the file
+ # without reading the header.
+ $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
+
# So file name.
- if($modname eq "") {
- $modname = "libc";
+ if($aix) {
+ if($modname eq "") {
+ $modname = "libc.a/shr_64.o";
+ } else {
+ print STDERR "$func: only syscall using libc are available\n";
+ $errors = 1;
+ next;
+ }
+
+ }
+ if($solaris) {
+ if($modname eq "") {
+ $modname = "libc";
+ }
+ $modname .= ".so";
+
}
# System call name.
@@ -114,7 +144,7 @@ while(<>) {
$sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
# Runtime import of function to allow cross-platform builds.
- $dynimports .= "//go:cgo_import_dynamic ${sysvarname} ${sysname} \"$modname.so\"\n";
+ $dynimports .= "//go:cgo_import_dynamic ${sysvarname} ${sysname} \"$modname\"\n";
# Link symbol to proc address variable.
$linknames .= "//go:linkname ${sysvarname} ${sysvarname}\n";
# Library proc address variable.
@@ -184,10 +214,21 @@ while(<>) {
}
my $nargs = @args;
+ my $asmfuncname="";
+ my $asmrawfuncname="";
+
+ if($aix){
+ $asmfuncname="syscall6";
+ $asmrawfuncname="rawSyscall6";
+ } else {
+ $asmfuncname="sysvicall6";
+ $asmrawfuncname="rawSysvicall6";
+ }
+
# Determine which form to use; pad args with zeros.
- my $asm = "${syscalldot}sysvicall6";
+ my $asm = "${syscalldot}${asmfuncname}";
if ($nonblock) {
- $asm = "${syscalldot}rawSysvicall6";
+ $asm = "${syscalldot}${asmrawfuncname}";
}
if(@args <= 6) {
while(@args < 6) {
diff --git a/src/syscall/mksyscall_windows.go b/src/syscall/mksyscall_windows.go
index 5fd3a756f8..dd84e33c0f 100644
--- a/src/syscall/mksyscall_windows.go
+++ b/src/syscall/mksyscall_windows.go
@@ -22,7 +22,7 @@ like func declarations if //sys is replaced by func, but:
* If the return parameter is an error number, it must be named err.
-* If go func name needs to be different from it's winapi dll name,
+* If go func name needs to be different from its winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
diff --git a/src/syscall/mmap_unix_test.go b/src/syscall/mmap_unix_test.go
index 01f7783022..d0b3644b59 100644
--- a/src/syscall/mmap_unix_test.go
+++ b/src/syscall/mmap_unix_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd
+// +build aix darwin dragonfly freebsd linux netbsd openbsd
package syscall_test
diff --git a/src/syscall/route_freebsd.go b/src/syscall/route_freebsd.go
index 2c2de7474a..2b47faff42 100644
--- a/src/syscall/route_freebsd.go
+++ b/src/syscall/route_freebsd.go
@@ -6,11 +6,7 @@ package syscall
import "unsafe"
-// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.
-var freebsdVersion uint32
-
func init() {
- freebsdVersion, _ = SysctlUint32("kern.osreldate")
conf, _ := Sysctl("kern.conftxt")
for i, j := 0, 0; j < len(conf); j++ {
if conf[j] != '\n' {
diff --git a/src/syscall/route_freebsd_32bit.go b/src/syscall/route_freebsd_32bit.go
index ec6f6b7f8b..aed8682237 100644
--- a/src/syscall/route_freebsd_32bit.go
+++ b/src/syscall/route_freebsd_32bit.go
@@ -22,7 +22,7 @@ func (any *anyMessage) parseInterfaceMessage(b []byte) *InterfaceMessage {
// FreeBSD 10 and beyond have a restructured mbuf
// packet header view.
// See https://svnweb.freebsd.org/base?view=revision&revision=254804.
- if freebsdVersion >= 1000000 {
+ if supportsABI(1000000) {
m := (*ifMsghdr)(unsafe.Pointer(any))
p.Header.Data.Hwassist = uint32(m.Data.Hwassist)
p.Header.Data.Epoch = m.Data.Epoch
diff --git a/src/syscall/sockcmsg_unix.go b/src/syscall/sockcmsg_unix.go
index 5712bf13f2..5020033bad 100644
--- a/src/syscall/sockcmsg_unix.go
+++ b/src/syscall/sockcmsg_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// Socket control messages
diff --git a/src/syscall/syscall_aix.go b/src/syscall/syscall_aix.go
new file mode 100644
index 0000000000..bddc590c34
--- /dev/null
+++ b/src/syscall/syscall_aix.go
@@ -0,0 +1,651 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Aix system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and
+// wrap it in our own nicer implementation.
+
+package syscall
+
+import (
+ "unsafe"
+)
+
+// Implemented in runtime/syscall_aix.go.
+func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+
+// Constant expected by package but not supported
+const (
+ _ = iota
+ TIOCSCTTY
+ F_DUPFD_CLOEXEC
+ SYS_EXECVE
+ SYS_FCNTL
+)
+
+/*
+ * Wrapped
+ */
+
+// fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX
+// There is no way to create a custom fcntl and to keep //sys fcntl easily,
+// because we need fcntl name for its libc symbol. This is linked with the script.
+// But, as fcntl is currently not exported and isn't called with F_DUP2FD,
+// it doesn't matter.
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+//sys dup2(old int, new int) (val int, err error)
+
+//sysnb pipe(p *[2]_C_int) (err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sys readlink(path string, buf []byte, bufSize uint64) (n int, err error)
+func Readlink(path string, buf []byte) (n int, err error) {
+ s := uint64(len(buf))
+ return readlink(path, buf, s)
+}
+
+//sys utimes(path string, times *[2]Timeval) (err error)
+func Utimes(path string, tv []Timeval) error {
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
+func UtimesNano(path string, ts []Timespec) error {
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+//sys unlinkat(dirfd int, path string, flags int) (err error)
+func Unlinkat(dirfd int, path string) (err error) {
+ return unlinkat(dirfd, path, 0)
+}
+
+//sys getcwd(buf *byte, size uint64) (err error)
+
+const ImplementsGetwd = true
+
+func Getwd() (ret string, err error) {
+ for len := uint64(4096); ; len *= 2 {
+ b := make([]byte, len)
+ err := getcwd(&b[0], len)
+ if err == nil {
+ i := 0
+ for b[i] != 0 {
+ i++
+ }
+ return string(b[0:i]), nil
+ }
+ if err != ERANGE {
+ return "", err
+ }
+ }
+}
+
+func Getcwd(buf []byte) (n int, err error) {
+ err = getcwd(&buf[0], uint64(len(buf)))
+ if err == nil {
+ i := 0
+ for buf[i] != 0 {
+ i++
+ }
+ n = i + 1
+ }
+ return
+}
+
+//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error)
+//sysnb setgroups(ngid int, gid *_Gid_t) (err error)
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 16 on BSD.
+ if n < 0 || n > 1000 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+func direntIno(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
+}
+
+func direntReclen(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
+}
+
+func direntNamlen(buf []byte) (uint64, bool) {
+ reclen, ok := direntReclen(buf)
+ if !ok {
+ return 0, false
+ }
+ return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
+}
+
+func Gettimeofday(tv *Timeval) (err error) {
+ err = gettimeofday(tv, nil)
+ return
+}
+
+// TODO
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ return -1, ENOSYS
+}
+
+//sys getdirent(fd int, buf []byte) (n int, err error)
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ return getdirent(fd, buf)
+}
+
+//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error)
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ var r Pid_t
+ err = ERESTART
+ // AIX wait4 may return with ERESTART errno, while the processus is still
+ // active.
+ for err == ERESTART {
+ r, err = wait4(Pid_t(pid), &status, options, rusage)
+ }
+ wpid = int(r)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+/*
+ * Socket
+ */
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys Getkerninfo(op int32, where uintptr, size uintptr, arg int64) (i int32, err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sys socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys Shutdown(s int, how int) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *RawSockaddrUnix) setLen(n int) {
+ sa.Len = uint8(3 + n) // 2 for Family, Len; 1 for NUL.
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n > len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ sa.raw.setLen(n)
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = uint8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if err != nil {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ var sockType int
+ sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
+ if err != nil {
+ return
+ }
+ // receive at least one normal byte
+ if sockType != SOCK_DGRAM && len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ var sockType int
+ sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
+ if err != nil {
+ return 0, err
+ }
+ // send at least one normal byte
+ if sockType != SOCK_DGRAM && len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+func (sa *RawSockaddrUnix) getLen() (int, error) {
+ // Some versions of AIX have a bug in getsockname (see IV78655).
+ // We can't rely on sa.Len being set correctly.
+ n := SizeofSockaddrUnix - 3 // substract leading Family, Len, terminating NUL.
+ for i := 0; i < n; i++ {
+ if sa.Path[i] == 0 {
+ n = i
+ break
+ }
+ }
+ return n, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ n, err := pp.getLen()
+ if err != nil {
+ return nil, err
+ }
+ bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))
+ sa.Name = string(bytes[0:n])
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+/*
+ * Wait
+ */
+
+type WaitStatus uint32
+
+func (w WaitStatus) Stopped() bool { return w&0x40 != 0 }
+func (w WaitStatus) StopSignal() Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return Signal(w>>8) & 0xFF
+}
+
+func (w WaitStatus) Exited() bool { return w&0xFF == 0 }
+func (w WaitStatus) ExitStatus() int {
+ if !w.Exited() {
+ return -1
+ }
+ return int((w >> 8) & 0xFF)
+}
+
+func (w WaitStatus) Signaled() bool { return w&0x40 == 0 && w&0xFF != 0 }
+func (w WaitStatus) Signal() Signal {
+ if !w.Signaled() {
+ return -1
+ }
+ return Signal(w>>16) & 0xFF
+}
+
+func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 }
+
+func (w WaitStatus) CoreDump() bool { return w&0x200 == 0 }
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+/*
+ * ptrace
+ */
+
+//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
+//sys ptrace64(request int, id int64, addr int64, data int, buff uintptr) (err error)
+
+func raw_ptrace(request int, pid int, addr *byte, data *byte) Errno {
+ if request == PTRACE_TRACEME {
+ // Convert to AIX ptrace call.
+ err := ptrace64(PT_TRACE_ME, 0, 0, 0, 0)
+ if err != nil {
+ return err.(Errno)
+ }
+ return 0
+ }
+ return ENOSYS
+}
+
+func ptracePeek(pid int, addr uintptr, out []byte) (count int, err error) {
+ n := 0
+ for len(out) > 0 {
+ bsize := len(out)
+ if bsize > 1024 {
+ bsize = 1024
+ }
+ err = ptrace64(PT_READ_BLOCK, int64(pid), int64(addr), bsize, uintptr(unsafe.Pointer(&out[0])))
+ if err != nil {
+ return 0, err
+ }
+ addr += uintptr(bsize)
+ n += bsize
+ out = out[n:]
+ }
+ return n, nil
+}
+
+func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(pid, addr, out)
+}
+
+func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(pid, addr, out)
+}
+
+func ptracePoke(pid int, addr uintptr, data []byte) (count int, err error) {
+ n := 0
+ for len(data) > 0 {
+ bsize := len(data)
+ if bsize > 1024 {
+ bsize = 1024
+ }
+ err = ptrace64(PT_WRITE_BLOCK, int64(pid), int64(addr), bsize, uintptr(unsafe.Pointer(&data[0])))
+ if err != nil {
+ return 0, err
+ }
+ addr += uintptr(bsize)
+ n += bsize
+ data = data[n:]
+ }
+ return n, nil
+}
+
+func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(pid, addr, data)
+}
+
+func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(pid, addr, data)
+}
+
+func PtraceCont(pid int, signal int) (err error) {
+ return ptrace64(PT_CONTINUE, int64(pid), 1, signal, 0)
+}
+
+func PtraceSingleStep(pid int) (err error) { return ptrace64(PT_STEP, int64(pid), 1, 0, 0) }
+
+func PtraceAttach(pid int) (err error) { return ptrace64(PT_ATTACH, int64(pid), 0, 0, 0) }
+
+func PtraceDetach(pid int) (err error) { return ptrace64(PT_DETACH, int64(pid), 0, 0, 0) }
+
+/*
+ * Direct access
+ */
+
+//sys Acct(path string) (err error)
+//sys Chdir(path string) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Fsync(fd int) (err error)
+//sysnb Getgid() (gid int)
+//sysnb Getpid() (pid int)
+//sys Geteuid() (euid int)
+//sys Getegid() (egid int)
+//sys Getppid() (ppid int)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sys Kill(pid int, signum Signal) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Reboot(how int) (err error)
+//sys Rename(from string, to string) (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Unlink(path string) (err error)
+//sysnb Uname(buf *Utsname) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+
+//sys gettimeofday(tv *Timeval, tzp *Timezone) (err error)
+
+func setTimespec(sec, nsec int64) Timespec {
+ return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+ return Timeval{Sec: sec, Usec: int32(usec)}
+}
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_read)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Map
+ */
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
diff --git a/src/syscall/syscall_aix_ppc64.go b/src/syscall/syscall_aix_ppc64.go
new file mode 100644
index 0000000000..21ad5bc296
--- /dev/null
+++ b/src/syscall/syscall_aix_ppc64.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/src/syscall/syscall_freebsd.go b/src/syscall/syscall_freebsd.go
index d5738ba1c1..9ae024131d 100644
--- a/src/syscall/syscall_freebsd.go
+++ b/src/syscall/syscall_freebsd.go
@@ -12,7 +12,34 @@
package syscall
-import "unsafe"
+import (
+ "sync"
+ "unsafe"
+)
+
+const (
+ _SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); }
+ _SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \
+ _SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \
+ _SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \
+ _SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \
+ _SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \
+ _SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \
+)
+
+// See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html.
+var (
+ osreldateOnce sync.Once
+ osreldate uint32
+)
+
+// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h
+const _ino64First = 1200031
+
+func supportsABI(ver uint32) bool {
+ osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") })
+ return osreldate >= ver
+}
type SockaddrDatalink struct {
Len uint8
@@ -113,17 +140,39 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
}
func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
- var _p0 unsafe.Pointer
- var bufsize uintptr
+ var (
+ _p0 unsafe.Pointer
+ bufsize uintptr
+ oldBuf []statfs_freebsd11_t
+ needsConvert bool
+ )
+
if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ if supportsABI(_ino64First) {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ } else {
+ n := len(buf)
+ oldBuf = make([]statfs_freebsd11_t, n)
+ _p0 = unsafe.Pointer(&oldBuf[0])
+ bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n)
+ needsConvert = true
+ }
}
- r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ var sysno uintptr = SYS_GETFSSTAT
+ if supportsABI(_ino64First) {
+ sysno = _SYS_GETFSSTAT_FREEBSD12
+ }
+ r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags))
n = int(r0)
if e1 != 0 {
err = e1
}
+ if e1 == 0 && needsConvert {
+ for i := range oldBuf {
+ buf[i].convertFrom(&oldBuf[i])
+ }
+ }
return
}
@@ -132,6 +181,204 @@ func setattrlistTimes(path string, times []Timespec) error {
return ENOSYS
}
+func Stat(path string, st *Stat_t) (err error) {
+ var oldStat stat_freebsd11_t
+ if supportsABI(_ino64First) {
+ return fstatat_freebsd12(_AT_FDCWD, path, st, 0)
+ }
+ err = stat(path, &oldStat)
+ if err != nil {
+ return err
+ }
+
+ st.convertFrom(&oldStat)
+ return nil
+}
+
+func Lstat(path string, st *Stat_t) (err error) {
+ var oldStat stat_freebsd11_t
+ if supportsABI(_ino64First) {
+ return fstatat_freebsd12(_AT_FDCWD, path, st, _AT_SYMLINK_NOFOLLOW)
+ }
+ err = lstat(path, &oldStat)
+ if err != nil {
+ return err
+ }
+
+ st.convertFrom(&oldStat)
+ return nil
+}
+
+func Fstat(fd int, st *Stat_t) (err error) {
+ var oldStat stat_freebsd11_t
+ if supportsABI(_ino64First) {
+ return fstat_freebsd12(fd, st)
+ }
+ err = fstat(fd, &oldStat)
+ if err != nil {
+ return err
+ }
+
+ st.convertFrom(&oldStat)
+ return nil
+}
+
+func Statfs(path string, stat *Statfs_t) (err error) {
+ var oldStatfs statfs_freebsd11_t
+ if supportsABI(_ino64First) {
+ return statfs_freebsd12(path, stat)
+ }
+ err = statfs(path, &oldStatfs)
+ if err != nil {
+ return err
+ }
+
+ stat.convertFrom(&oldStatfs)
+ return nil
+}
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+ var oldStatfs statfs_freebsd11_t
+ if supportsABI(_ino64First) {
+ return fstatfs_freebsd12(fd, stat)
+ }
+ err = fstatfs(fd, &oldStatfs)
+ if err != nil {
+ return err
+ }
+
+ stat.convertFrom(&oldStatfs)
+ return nil
+}
+
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ if supportsABI(_ino64First) {
+ return getdirentries_freebsd12(fd, buf, basep)
+ }
+
+ // The old syscall entries are smaller than the new. Use 1/4 of the original
+ // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c).
+ oldBufLen := roundup(len(buf)/4, _dirblksiz)
+ oldBuf := make([]byte, oldBufLen)
+ n, err = getdirentries(fd, oldBuf, basep)
+ if err == nil && n > 0 {
+ n = convertFromDirents11(oldBuf[:n], buf)
+ }
+ return
+}
+
+func Mknod(path string, mode uint32, dev uint64) (err error) {
+ var oldDev int
+ if supportsABI(_ino64First) {
+ return mknodat_freebsd12(_AT_FDCWD, path, mode, dev)
+ }
+ oldDev = int(dev)
+ return mknod(path, mode, oldDev)
+}
+
+// round x to the nearest multiple of y, larger or equal to x.
+//
+// from /usr/include/sys/param.h Macros for counting and rounding.
+// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
+func roundup(x, y int) int {
+ return ((x + y - 1) / y) * y
+}
+
+func (s *Stat_t) convertFrom(old *stat_freebsd11_t) {
+ *s = Stat_t{
+ Dev: uint64(old.Dev),
+ Ino: uint64(old.Ino),
+ Nlink: uint64(old.Nlink),
+ Mode: old.Mode,
+ Uid: old.Uid,
+ Gid: old.Gid,
+ Rdev: uint64(old.Rdev),
+ Atim: old.Atim,
+ Mtim: old.Mtim,
+ Ctim: old.Ctim,
+ Birthtim: old.Birthtim,
+ Size: old.Size,
+ Blocks: old.Blocks,
+ Blksize: old.Blksize,
+ Flags: old.Flags,
+ Gen: uint64(old.Gen),
+ }
+}
+
+func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) {
+ *s = Statfs_t{
+ Version: _statfsVersion,
+ Type: old.Type,
+ Flags: old.Flags,
+ Bsize: old.Bsize,
+ Iosize: old.Iosize,
+ Blocks: old.Blocks,
+ Bfree: old.Bfree,
+ Bavail: old.Bavail,
+ Files: old.Files,
+ Ffree: old.Ffree,
+ Syncwrites: old.Syncwrites,
+ Asyncwrites: old.Asyncwrites,
+ Syncreads: old.Syncreads,
+ Asyncreads: old.Asyncreads,
+ // Spare
+ Namemax: old.Namemax,
+ Owner: old.Owner,
+ Fsid: old.Fsid,
+ // Charspare
+ // Fstypename
+ // Mntfromname
+ // Mntonname
+ }
+
+ sl := old.Fstypename[:]
+ n := clen(*(*[]byte)(unsafe.Pointer(&sl)))
+ copy(s.Fstypename[:], old.Fstypename[:n])
+
+ sl = old.Mntfromname[:]
+ n = clen(*(*[]byte)(unsafe.Pointer(&sl)))
+ copy(s.Mntfromname[:], old.Mntfromname[:n])
+
+ sl = old.Mntonname[:]
+ n = clen(*(*[]byte)(unsafe.Pointer(&sl)))
+ copy(s.Mntonname[:], old.Mntonname[:n])
+}
+
+func convertFromDirents11(old []byte, buf []byte) int {
+ oldFixedSize := int(unsafe.Offsetof((*dirent_freebsd11)(nil).Name))
+ fixedSize := int(unsafe.Offsetof((*Dirent)(nil).Name))
+ srcPos := 0
+ dstPos := 0
+ for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) {
+ srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos]))
+ dstDirent := (*Dirent)(unsafe.Pointer(&buf[dstPos]))
+
+ reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8)
+ if dstPos+reclen >= len(buf) {
+ break
+ }
+
+ dstDirent.Fileno = uint64(srcDirent.Fileno)
+ dstDirent.Off = 0
+ dstDirent.Reclen = uint16(reclen)
+ dstDirent.Type = srcDirent.Type
+ dstDirent.Pad0 = 0
+ dstDirent.Namlen = uint16(srcDirent.Namlen)
+ dstDirent.Pad1 = 0
+
+ copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen])
+ padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen]
+ for i := range padding {
+ padding[i] = 0
+ }
+
+ dstPos += int(dstDirent.Reclen)
+ srcPos += int(srcDirent.Reclen)
+ }
+
+ return dstPos
+}
+
/*
* Exposed directly
*/
@@ -151,11 +398,15 @@ func setattrlistTimes(path string, times []Timespec) error {
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Flock(fd int, how int) (err error)
//sys Fpathconf(fd int, name int) (val int, err error)
-//sys Fstat(fd int, stat *Stat_t) (err error)
-//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys fstat(fd int, stat *stat_freebsd11_t) (err error)
+//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) = _SYS_FSTAT_FREEBSD12
+//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) = _SYS_FSTATAT_FREEBSD12
+//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error)
+//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) = _SYS_FSTATFS_FREEBSD12
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
-//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) = _SYS_GETDIRENTRIES_FREEBSD12
//sys Getdtablesize() (size int)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (uid int)
@@ -176,10 +427,11 @@ func setattrlistTimes(path string, times []Timespec) error {
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
//sys Listen(s int, backlog int) (err error)
-//sys Lstat(path string, stat *Stat_t) (err error)
+//sys lstat(path string, stat *stat_freebsd11_t) (err error)
//sys Mkdir(path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
-//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys mknod(path string, mode uint32, dev int) (err error)
+//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) = _SYS_MKNODAT_FREEBSD12
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Pathconf(path string, name int) (val int, err error)
@@ -204,8 +456,9 @@ func setattrlistTimes(path string, times []Timespec) error {
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tp *Timeval) (err error)
//sysnb Setuid(uid int) (err error)
-//sys Stat(path string, stat *Stat_t) (err error)
-//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys stat(path string, stat *stat_freebsd11_t) (err error)
+//sys statfs(path string, stat *statfs_freebsd11_t) (err error)
+//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) = _SYS_STATFS_FREEBSD12
//sys Symlink(path string, link string) (err error)
//sys Sync() (err error)
//sys Truncate(path string, length int64) (err error)
diff --git a/src/syscall/syscall_unix.go b/src/syscall/syscall_unix.go
index c9c0f62dd2..4336851554 100644
--- a/src/syscall/syscall_unix.go
+++ b/src/syscall/syscall_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package syscall
diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go
index 528ef4f26d..de05840386 100644
--- a/src/syscall/syscall_windows.go
+++ b/src/syscall/syscall_windows.go
@@ -123,14 +123,14 @@ func compileCallback(fn interface{}, cleanstack bool) uintptr
// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
// This is useful when interoperating with Windows code requiring callbacks.
-// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
func NewCallback(fn interface{}) uintptr {
return compileCallback(fn, true)
}
// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
// This is useful when interoperating with Windows code requiring callbacks.
-// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
func NewCallbackCDecl(fn interface{}) uintptr {
return compileCallback(fn, false)
}
diff --git a/src/syscall/timestruct.go b/src/syscall/timestruct.go
index 84a00a77d8..d17811c121 100644
--- a/src/syscall/timestruct.go
+++ b/src/syscall/timestruct.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package syscall
diff --git a/src/syscall/types_aix.go b/src/syscall/types_aix.go
new file mode 100644
index 0000000000..f9f05af667
--- /dev/null
+++ b/src/syscall/types_aix.go
@@ -0,0 +1,172 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package syscall
+
+/*
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+
+#include
+#include
+#include
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics; for internal use.
+
+const (
+ sizeofPtr = C.sizeofPtr
+ sizeofShort = C.sizeof_short
+ sizeofInt = C.sizeof_int
+ sizeofLong = C.sizeof_long
+ sizeofLongLong = C.sizeof_longlong
+ PathMax = C.PATH_MAX
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timeval32 C.struct_timeval32
+
+type Timezone C.struct_timezone
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type Pid_t C.pid_t
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Flock_t C.struct_flock
+
+type Stat_t C.struct_stat
+
+type Statfs_t C.struct_statfs
+
+type Fsid64_t C.fsid64_t
+
+type StTimespec_t C.st_timespec_t
+
+type Dirent C.struct_dirent
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Cmsghdr C.struct_cmsghdr
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Linger C.struct_linger
+
+type Msghdr C.struct_msghdr
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+)
+
+type IfMsgHdr C.struct_if_msghdr
+
+// Misc
+
+type Utsname C.struct_utsname
+
+const (
+ _AT_FDCWD = C.AT_FDCWD
+ _AT_REMOVEDIR = C.AT_REMOVEDIR
+ _AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
diff --git a/src/syscall/types_freebsd.go b/src/syscall/types_freebsd.go
index 066a4acbd7..f686021121 100644
--- a/src/syscall/types_freebsd.go
+++ b/src/syscall/types_freebsd.go
@@ -14,7 +14,11 @@ Input to cgo -godefs. See also mkerrors.sh and mkall.sh
package syscall
/*
-#define KERNEL
+#define _WANT_FREEBSD11_STAT 1
+#define _WANT_FREEBSD11_STATFS 1
+#define _WANT_FREEBSD11_DIRENT 1
+#define _WANT_FREEBSD11_KEVENT 1
+
#include
#include
#include
@@ -60,50 +64,6 @@ struct sockaddr_any {
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
-// This structure is a duplicate of stat on FreeBSD 8-STABLE.
-// See /usr/include/sys/stat.h.
-struct stat8 {
-#undef st_atimespec st_atim
-#undef st_mtimespec st_mtim
-#undef st_ctimespec st_ctim
-#undef st_birthtimespec st_birthtim
- __dev_t st_dev;
- ino_t st_ino;
- mode_t st_mode;
- nlink_t st_nlink;
- uid_t st_uid;
- gid_t st_gid;
- __dev_t st_rdev;
-#if __BSD_VISIBLE
- struct timespec st_atimespec;
- struct timespec st_mtimespec;
- struct timespec st_ctimespec;
-#else
- time_t st_atime;
- long __st_atimensec;
- time_t st_mtime;
- long __st_mtimensec;
- time_t st_ctime;
- long __st_ctimensec;
-#endif
- off_t st_size;
- blkcnt_t st_blocks;
- blksize_t st_blksize;
- fflags_t st_flags;
- __uint32_t st_gen;
- __int32_t st_lspare;
-#if __BSD_VISIBLE
- struct timespec st_birthtimespec;
- unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec));
- unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec));
-#else
- time_t st_birthtime;
- long st_birthtimensec;
- unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec));
- unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec));
-#endif
-};
-
// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
// See /usr/include/net/if.h.
struct if_data8 {
@@ -130,7 +90,10 @@ struct if_data8 {
u_long ifi_iqdrops;
u_long ifi_noproto;
u_long ifi_hwassist;
+// FIXME: these are now unions, so maybe need to change definitions?
+#undef ifi_epoch
time_t ifi_epoch;
+#undef ifi_lastchange
struct timeval ifi_lastchange;
};
@@ -202,14 +165,25 @@ const ( // Directory mode bits
S_IRWXO = C.S_IRWXO
)
-type Stat_t C.struct_stat8
+const (
+ _statfsVersion = C.STATFS_VERSION
+ _dirblksiz = C.DIRBLKSIZ
+)
+
+type Stat_t C.struct_stat
+
+type stat_freebsd11_t C.struct_freebsd11_stat
type Statfs_t C.struct_statfs
+type statfs_freebsd11_t C.struct_freebsd11_statfs
+
type Flock_t C.struct_flock
type Dirent C.struct_dirent
+type dirent_freebsd11 C.struct_freebsd11_dirent
+
type Fsid C.struct_fsid
// File system limits
@@ -281,7 +255,7 @@ const (
// Events (kqueue, kevent)
-type Kevent_t C.struct_kevent
+type Kevent_t C.struct_kevent_freebsd11
// Select
@@ -348,7 +322,9 @@ type BpfZbufHeader C.struct_bpf_zbuf_header
// Misc
const (
- _AT_FDCWD = C.AT_FDCWD
+ _AT_FDCWD = C.AT_FDCWD
+ _AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ _AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// Terminal handling
diff --git a/src/syscall/zerrors_aix_ppc64.go b/src/syscall/zerrors_aix_ppc64.go
new file mode 100644
index 0000000000..60130099a7
--- /dev/null
+++ b/src/syscall/zerrors_aix_ppc64.go
@@ -0,0 +1,1248 @@
+// mkerrors.sh -maix64
+// Code generated by the command above; DO NOT EDIT.
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -maix64 _const.go
+
+package syscall
+
+const (
+ AF_APPLETALK = 0x10
+ AF_BYPASS = 0x19
+ AF_CCITT = 0xa
+ AF_CHAOS = 0x5
+ AF_DATAKIT = 0x9
+ AF_DECnet = 0xc
+ AF_DLI = 0xd
+ AF_ECMA = 0x8
+ AF_HYLINK = 0xf
+ AF_IMPLINK = 0x3
+ AF_INET = 0x2
+ AF_INET6 = 0x18
+ AF_INTF = 0x14
+ AF_ISO = 0x7
+ AF_LAT = 0xe
+ AF_LINK = 0x12
+ AF_MAX = 0x1e
+ AF_NDD = 0x17
+ AF_NETWARE = 0x16
+ AF_NS = 0x6
+ AF_OSI = 0x7
+ AF_PUP = 0x4
+ AF_RIF = 0x15
+ AF_ROUTE = 0x11
+ AF_SNA = 0xb
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ ARPHRD_802_3 = 0x6
+ ARPHRD_802_5 = 0x6
+ ARPHRD_ETHER = 0x1
+ ARPHRD_FDDI = 0x1
+ B0 = 0x0
+ B110 = 0x3
+ B1200 = 0x9
+ B134 = 0x4
+ B150 = 0x5
+ B1800 = 0xa
+ B19200 = 0xe
+ B200 = 0x6
+ B2400 = 0xb
+ B300 = 0x7
+ B38400 = 0xf
+ B4800 = 0xc
+ B50 = 0x1
+ B600 = 0x8
+ B75 = 0x2
+ B9600 = 0xd
+ CFLUSH = 0xf
+ CSIOCGIFCONF = -0x3fef96dc
+ CSTART = '\021'
+ CSTOP = '\023'
+ CSUSP = 0x1a
+ ECHO = 0x8
+ ECH_ICMPID = 0x2
+ ETHERNET_CSMACD = 0x6
+ EVENP = 0x80
+ EXCONTINUE = 0x0
+ EXDLOK = 0x3
+ EXIO = 0x2
+ EXPGIO = 0x0
+ EXRESUME = 0x2
+ EXRETURN = 0x1
+ EXSIG = 0x4
+ EXTA = 0xe
+ EXTB = 0xf
+ EXTRAP = 0x1
+ EYEC_RTENTRYA = 0x257274656e747241
+ EYEC_RTENTRYF = 0x257274656e747246
+ E_ACC = 0x0
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0xfffe
+ FLUSHBAND = 0x40
+ FLUSHLOW = 0x8
+ FLUSHO = 0x100000
+ FLUSHR = 0x1
+ FLUSHRW = 0x3
+ FLUSHW = 0x2
+ F_CLOSEM = 0xa
+ F_DUP2FD = 0xe
+ F_DUPFD = 0x0
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLK = 0xb
+ F_GETLK64 = 0xb
+ F_GETOWN = 0x8
+ F_LOCK = 0x1
+ F_OK = 0x0
+ F_RDLCK = 0x1
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLK = 0xc
+ F_SETLK64 = 0xc
+ F_SETLKW = 0xd
+ F_SETLKW64 = 0xd
+ F_SETOWN = 0x9
+ F_TEST = 0x3
+ F_TLOCK = 0x2
+ F_TSTLK = 0xf
+ F_ULOCK = 0x0
+ F_UNLCK = 0x3
+ F_WRLCK = 0x2
+ ICMP6_FILTER = 0x26
+ ICMP6_SEC_SEND_DEL = 0x46
+ ICMP6_SEC_SEND_GET = 0x47
+ ICMP6_SEC_SEND_SET = 0x44
+ ICMP6_SEC_SEND_SET_CGA_ADDR = 0x45
+ IFA_FIRSTALIAS = 0x2000
+ IFA_ROUTE = 0x1
+ IFF_64BIT = 0x4000000
+ IFF_ALLCAST = 0x20000
+ IFF_ALLMULTI = 0x200
+ IFF_BPF = 0x8000000
+ IFF_BRIDGE = 0x40000
+ IFF_BROADCAST = 0x2
+ IFF_CANTCHANGE = 0x80c52
+ IFF_CHECKSUM_OFFLOAD = 0x10000000
+ IFF_D1 = 0x8000
+ IFF_D2 = 0x4000
+ IFF_D3 = 0x2000
+ IFF_D4 = 0x1000
+ IFF_DEBUG = 0x4
+ IFF_DEVHEALTH = 0x4000
+ IFF_DO_HW_LOOPBACK = 0x10000
+ IFF_GROUP_ROUTING = 0x2000000
+ IFF_IFBUFMGT = 0x800000
+ IFF_LINK0 = 0x100000
+ IFF_LINK1 = 0x200000
+ IFF_LINK2 = 0x400000
+ IFF_LOOPBACK = 0x8
+ IFF_MULTICAST = 0x80000
+ IFF_NOARP = 0x80
+ IFF_NOECHO = 0x800
+ IFF_NOTRAILERS = 0x20
+ IFF_OACTIVE = 0x400
+ IFF_POINTOPOINT = 0x10
+ IFF_PROMISC = 0x100
+ IFF_PSEG = 0x40000000
+ IFF_RUNNING = 0x40
+ IFF_SIMPLEX = 0x800
+ IFF_SNAP = 0x8000
+ IFF_TCP_DISABLE_CKSUM = 0x20000000
+ IFF_TCP_NOCKSUM = 0x1000000
+ IFF_UP = 0x1
+ IFF_VIPA = 0x80000000
+ IFNAMSIZ = 0x10
+ IFO_FLUSH = 0x1
+ IFT_1822 = 0x2
+ IFT_AAL5 = 0x31
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ATM = 0x25
+ IFT_CEPT = 0x13
+ IFT_CLUSTER = 0x3e
+ IFT_DS3 = 0x1e
+ IFT_EON = 0x19
+ IFT_ETHER = 0x6
+ IFT_FCS = 0x3a
+ IFT_FDDI = 0xf
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_GIFTUNNEL = 0x3c
+ IFT_HDH1822 = 0x3
+ IFT_HF = 0x3d
+ IFT_HIPPI = 0x2f
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IB = 0xc7
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88026 = 0xa
+ IFT_LAPB = 0x10
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_NSIP = 0x1b
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PPP = 0x17
+ IFT_PROPMUX = 0x36
+ IFT_PROPVIRTUAL = 0x35
+ IFT_PTPSERIAL = 0x16
+ IFT_RS232 = 0x21
+ IFT_SDLC = 0x11
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SN = 0x38
+ IFT_SONET = 0x27
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SP = 0x39
+ IFT_STARLAN = 0xb
+ IFT_T1 = 0x12
+ IFT_TUNNEL = 0x3b
+ IFT_ULTRA = 0x1d
+ IFT_V35 = 0x2d
+ IFT_VIPA = 0x37
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLASSD_HOST = 0xfffffff
+ IN_CLASSD_NET = 0xf0000000
+ IN_CLASSD_NSHIFT = 0x1c
+ IN_LOOPBACKNET = 0x7f
+ IN_USE = 0x1
+ IPPROTO_AH = 0x33
+ IPPROTO_BIP = 0x53
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_EON = 0x50
+ IPPROTO_ESP = 0x32
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GGP = 0x3
+ IPPROTO_GIF = 0x8c
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_LOCAL = 0x3f
+ IPPROTO_MAX = 0x100
+ IPPROTO_MH = 0x87
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PUP = 0xc
+ IPPROTO_QOS = 0x2d
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPV6_ADDRFORM = 0x16
+ IPV6_ADDR_PREFERENCES = 0x4a
+ IPV6_ADD_MEMBERSHIP = 0xc
+ IPV6_AIXRAWSOCKET = 0x39
+ IPV6_CHECKSUM = 0x27
+ IPV6_DONTFRAG = 0x2d
+ IPV6_DROP_MEMBERSHIP = 0xd
+ IPV6_DSTOPTS = 0x36
+ IPV6_FLOWINFO_FLOWLABEL = 0xffffff
+ IPV6_FLOWINFO_PRIFLOW = 0xfffffff
+ IPV6_FLOWINFO_PRIORITY = 0xf000000
+ IPV6_FLOWINFO_SRFLAG = 0x10000000
+ IPV6_FLOWINFO_VERSION = 0xf0000000
+ IPV6_HOPLIMIT = 0x28
+ IPV6_HOPOPTS = 0x34
+ IPV6_JOIN_GROUP = 0xc
+ IPV6_LEAVE_GROUP = 0xd
+ IPV6_MIPDSTOPTS = 0x36
+ IPV6_MULTICAST_HOPS = 0xa
+ IPV6_MULTICAST_IF = 0x9
+ IPV6_MULTICAST_LOOP = 0xb
+ IPV6_NEXTHOP = 0x30
+ IPV6_NOPROBE = 0x1c
+ IPV6_PATHMTU = 0x2e
+ IPV6_PKTINFO = 0x21
+ IPV6_PKTOPTIONS = 0x24
+ IPV6_PRIORITY_10 = 0xa000000
+ IPV6_PRIORITY_11 = 0xb000000
+ IPV6_PRIORITY_12 = 0xc000000
+ IPV6_PRIORITY_13 = 0xd000000
+ IPV6_PRIORITY_14 = 0xe000000
+ IPV6_PRIORITY_15 = 0xf000000
+ IPV6_PRIORITY_8 = 0x8000000
+ IPV6_PRIORITY_9 = 0x9000000
+ IPV6_PRIORITY_BULK = 0x4000000
+ IPV6_PRIORITY_CONTROL = 0x7000000
+ IPV6_PRIORITY_FILLER = 0x1000000
+ IPV6_PRIORITY_INTERACTIVE = 0x6000000
+ IPV6_PRIORITY_RESERVED1 = 0x3000000
+ IPV6_PRIORITY_RESERVED2 = 0x5000000
+ IPV6_PRIORITY_UNATTENDED = 0x2000000
+ IPV6_PRIORITY_UNCHARACTERIZED = 0x0
+ IPV6_RECVDSTOPTS = 0x38
+ IPV6_RECVHOPLIMIT = 0x29
+ IPV6_RECVHOPOPTS = 0x35
+ IPV6_RECVHOPS = 0x22
+ IPV6_RECVIF = 0x1e
+ IPV6_RECVPATHMTU = 0x2f
+ IPV6_RECVPKTINFO = 0x23
+ IPV6_RECVRTHDR = 0x33
+ IPV6_RECVSRCRT = 0x1d
+ IPV6_RECVTCLASS = 0x2a
+ IPV6_RTHDR = 0x32
+ IPV6_RTHDRDSTOPTS = 0x37
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_RTHDR_TYPE_2 = 0x2
+ IPV6_SENDIF = 0x1f
+ IPV6_SRFLAG_LOOSE = 0x0
+ IPV6_SRFLAG_STRICT = 0x10000000
+ IPV6_TCLASS = 0x2b
+ IPV6_TOKEN_LENGTH = 0x40
+ IPV6_UNICAST_HOPS = 0x4
+ IPV6_USE_MIN_MTU = 0x2c
+ IPV6_V6ONLY = 0x25
+ IPV6_VERSION = 0x60000000
+ IP_ADDRFORM = 0x16
+ IP_ADD_MEMBERSHIP = 0xc
+ IP_ADD_SOURCE_MEMBERSHIP = 0x3c
+ IP_BLOCK_SOURCE = 0x3a
+ IP_BROADCAST_IF = 0x10
+ IP_CACHE_LINE_SIZE = 0x80
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DHCPMODE = 0x11
+ IP_DONTFRAG = 0x19
+ IP_DROP_MEMBERSHIP = 0xd
+ IP_DROP_SOURCE_MEMBERSHIP = 0x3d
+ IP_FINDPMTU = 0x1a
+ IP_HDRINCL = 0x2
+ IP_INC_MEMBERSHIPS = 0x14
+ IP_INIT_MEMBERSHIP = 0x14
+ IP_MAXPACKET = 0xffff
+ IP_MF = 0x2000
+ IP_MSS = 0x240
+ IP_MULTICAST_HOPS = 0xa
+ IP_MULTICAST_IF = 0x9
+ IP_MULTICAST_LOOP = 0xb
+ IP_MULTICAST_TTL = 0xa
+ IP_OPT = 0x1b
+ IP_OPTIONS = 0x1
+ IP_PMTUAGE = 0x1b
+ IP_RECVDSTADDR = 0x7
+ IP_RECVIF = 0x14
+ IP_RECVIFINFO = 0xf
+ IP_RECVINTERFACE = 0x20
+ IP_RECVMACHDR = 0xe
+ IP_RECVOPTS = 0x5
+ IP_RECVRETOPTS = 0x6
+ IP_RECVTTL = 0x22
+ IP_RETOPTS = 0x8
+ IP_SOURCE_FILTER = 0x48
+ IP_TOS = 0x3
+ IP_TTL = 0x4
+ IP_UNBLOCK_SOURCE = 0x3b
+ IP_UNICAST_HOPS = 0x4
+ I_FLUSH = 0x20005305
+ LNOFLSH = 0x8000
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DONTNEED = 0x4
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_SEQUENTIAL = 0x2
+ MADV_SPACEAVAIL = 0x5
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x10
+ MAP_ANONYMOUS = 0x10
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x100
+ MAP_PRIVATE = 0x2
+ MAP_SHARED = 0x1
+ MAP_TYPE = 0xf0
+ MAP_VARIABLE = 0x0
+ MCL_CURRENT = 0x100
+ MCL_FUTURE = 0x200
+ MSG_ANY = 0x4
+ MSG_ARGEXT = 0x400
+ MSG_BAND = 0x2
+ MSG_COMPAT = 0x8000
+ MSG_CTRUNC = 0x20
+ MSG_DONTROUTE = 0x4
+ MSG_EOR = 0x8
+ MSG_HIPRI = 0x1
+ MSG_MAXIOVLEN = 0x10
+ MSG_MPEG2 = 0x80
+ MSG_NONBLOCK = 0x4000
+ MSG_NOSIGNAL = 0x100
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_TRUNC = 0x10
+ MSG_WAITALL = 0x40
+ MSG_WAITFORONE = 0x200
+ MS_ASYNC = 0x10
+ MS_EINTR = 0x80
+ MS_INVALIDATE = 0x40
+ MS_PER_SEC = 0x3e8
+ MS_SYNC = 0x20
+ NOFLUSH = 0x80000000
+ O_ACCMODE = 0x23
+ O_APPEND = 0x8
+ O_CIO = 0x80
+ O_CIOR = 0x800000000
+ O_CLOEXEC = 0x800000
+ O_CREAT = 0x100
+ O_DEFER = 0x2000
+ O_DELAY = 0x4000
+ O_DIRECT = 0x8000000
+ O_DIRECTORY = 0x80000
+ O_DSYNC = 0x400000
+ O_EFSOFF = 0x400000000
+ O_EFSON = 0x200000000
+ O_EXCL = 0x400
+ O_EXEC = 0x20
+ O_LARGEFILE = 0x4000000
+ O_NDELAY = 0x8000
+ O_NOCACHE = 0x100000
+ O_NOCTTY = 0x800
+ O_NOFOLLOW = 0x1000000
+ O_NONBLOCK = 0x4
+ O_NONE = 0x3
+ O_NSHARE = 0x10000
+ O_RAW = 0x100000000
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSHARE = 0x1000
+ O_RSYNC = 0x200000
+ O_SEARCH = 0x20
+ O_SNAPSHOT = 0x40
+ O_SYNC = 0x10
+ O_TRUNC = 0x200
+ O_TTY_INIT = 0x0
+ O_WRONLY = 0x1
+ PENDIN = 0x20000000
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PROT_EXEC = 0x4
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ PR_64BIT = 0x20
+ PR_ADDR = 0x2
+ PR_ARGEXT = 0x400
+ PR_ATOMIC = 0x1
+ PR_CONNREQUIRED = 0x4
+ PR_FASTHZ = 0x5
+ PR_INP = 0x40
+ PR_INTRLEVEL = 0x8000
+ PR_MLS = 0x100
+ PR_MLS_1_LABEL = 0x200
+ PR_NOEOR = 0x4000
+ PR_RIGHTS = 0x10
+ PR_SLOWHZ = 0x2
+ PR_WANTRCVD = 0x8
+ PT_ATTACH = 0x1e
+ PT_CLEAR = 0x26
+ PT_COMMAND_MAX = 0x45
+ PT_CONTINUE = 0x7
+ PT_DETACH = 0x1f
+ PT_GET_UKEY = 0x40
+ PT_KILL = 0x8
+ PT_LDINFO = 0x22
+ PT_LDXINFO = 0x27
+ PT_MULTI = 0x23
+ PT_NEXT = 0x24
+ PT_QUERY = 0x28
+ PT_READ_BLOCK = 0x11
+ PT_READ_D = 0x2
+ PT_READ_FPR = 0xc
+ PT_READ_GPR = 0xb
+ PT_READ_I = 0x1
+ PT_REATT = 0x21
+ PT_REGSET = 0x20
+ PT_SET = 0x25
+ PT_STEP = 0x9
+ PT_TRACE_ME = 0x0
+ PT_WATCH = 0x29
+ PT_WRITE_BLOCK = 0x13
+ PT_WRITE_D = 0x5
+ PT_WRITE_FPR = 0xf
+ PT_WRITE_GPR = 0xe
+ PT_WRITE_I = 0x4
+ RLIMIT_AS = 0x6
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_NOFILE = 0x7
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = 0x7fffffffffffffff
+ RTAX_AUTHOR = 0x6
+ RTAX_BRD = 0x7
+ RTAX_DST = 0x0
+ RTAX_GATEWAY = 0x1
+ RTAX_GENMASK = 0x3
+ RTAX_IFA = 0x5
+ RTAX_IFP = 0x4
+ RTAX_MAX = 0x8
+ RTAX_NETMASK = 0x2
+ RTA_AUTHOR = 0x40
+ RTA_BRD = 0x80
+ RTA_DOWNSTREAM = 0x100
+ RTA_DST = 0x1
+ RTA_GATEWAY = 0x2
+ RTA_GENMASK = 0x8
+ RTA_IFA = 0x20
+ RTA_IFP = 0x10
+ RTA_NETMASK = 0x4
+ RTF_ACTIVE_DGD = 0x1000000
+ RTF_BCE = 0x80000
+ RTF_BLACKHOLE = 0x1000
+ RTF_BROADCAST = 0x400000
+ RTF_BUL = 0x2000
+ RTF_CLONE = 0x10000
+ RTF_CLONED = 0x20000
+ RTF_CLONING = 0x100
+ RTF_DONE = 0x40
+ RTF_DYNAMIC = 0x10
+ RTF_FREE_IN_PROG = 0x4000000
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_LLINFO = 0x400
+ RTF_LOCAL = 0x200000
+ RTF_MASK = 0x80
+ RTF_MODIFIED = 0x20
+ RTF_MULTICAST = 0x800000
+ RTF_PERMANENT6 = 0x8000000
+ RTF_PINNED = 0x100000
+ RTF_PROTO1 = 0x8000
+ RTF_PROTO2 = 0x4000
+ RTF_PROTO3 = 0x40000
+ RTF_REJECT = 0x8
+ RTF_SMALLMTU = 0x40000
+ RTF_STATIC = 0x800
+ RTF_STOPSRCH = 0x2000000
+ RTF_UNREACHABLE = 0x10000000
+ RTF_UP = 0x1
+ RTF_XRESOLVE = 0x200
+ RTM_ADD = 0x1
+ RTM_CHANGE = 0x3
+ RTM_DELADDR = 0xd
+ RTM_DELETE = 0x2
+ RTM_EXPIRE = 0xf
+ RTM_GET = 0x4
+ RTM_GETNEXT = 0x11
+ RTM_IFINFO = 0xe
+ RTM_LOCK = 0x8
+ RTM_LOSING = 0x5
+ RTM_MISS = 0x7
+ RTM_NEWADDR = 0xc
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ RTM_REDIRECT = 0x6
+ RTM_RESOLVE = 0xb
+ RTM_RTLOST = 0x10
+ RTM_RTTUNIT = 0xf4240
+ RTM_SAMEADDR = 0x12
+ RTM_SET = 0x13
+ RTM_VERSION = 0x2
+ RTM_VERSION_GR = 0x4
+ RTM_VERSION_GR_COMPAT = 0x3
+ RTM_VERSION_POLICY = 0x5
+ RTM_VERSION_POLICY_EXT = 0x6
+ RTM_VERSION_POLICY_PRFN = 0x7
+ RTV_EXPIRE = 0x4
+ RTV_HOPCOUNT = 0x2
+ RTV_MTU = 0x1
+ RTV_RPIPE = 0x8
+ RTV_RTT = 0x40
+ RTV_RTTVAR = 0x80
+ RTV_SPIPE = 0x10
+ RTV_SSTHRESH = 0x20
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ RUSAGE_THREAD = 0x1
+ SCM_RIGHTS = 0x1
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIGQUEUE_MAX = 0x20
+ SIOCADDIFVIPA = 0x20006942
+ SIOCADDMTU = -0x7ffb9690
+ SIOCADDMULTI = -0x7fdf96cf
+ SIOCADDNETID = -0x7fd796a9
+ SIOCADDRT = -0x7fc78df6
+ SIOCAIFADDR = -0x7fbf96e6
+ SIOCATMARK = 0x40047307
+ SIOCDARP = -0x7fb396e0
+ SIOCDELIFVIPA = 0x20006943
+ SIOCDELMTU = -0x7ffb968f
+ SIOCDELMULTI = -0x7fdf96ce
+ SIOCDELPMTU = -0x7fd78ff6
+ SIOCDELRT = -0x7fc78df5
+ SIOCDIFADDR = -0x7fd796e7
+ SIOCDNETOPT = -0x3ffe9680
+ SIOCDX25XLATE = -0x7fd7969b
+ SIOCFIFADDR = -0x7fdf966d
+ SIOCGARP = -0x3fb396da
+ SIOCGETMTUS = 0x2000696f
+ SIOCGETSGCNT = -0x3feb8acc
+ SIOCGETVIFCNT = -0x3feb8acd
+ SIOCGHIWAT = 0x40047301
+ SIOCGIFADDR = -0x3fd796df
+ SIOCGIFADDRS = 0x2000698c
+ SIOCGIFBAUDRATE = -0x3fd79693
+ SIOCGIFBRDADDR = -0x3fd796dd
+ SIOCGIFCONF = -0x3fef96bb
+ SIOCGIFCONFGLOB = -0x3fef9670
+ SIOCGIFDSTADDR = -0x3fd796de
+ SIOCGIFFLAGS = -0x3fd796ef
+ SIOCGIFGIDLIST = 0x20006968
+ SIOCGIFHWADDR = -0x3fab966b
+ SIOCGIFMETRIC = -0x3fd796e9
+ SIOCGIFMTU = -0x3fd796aa
+ SIOCGIFNETMASK = -0x3fd796db
+ SIOCGIFOPTIONS = -0x3fd796d6
+ SIOCGISNO = -0x3fd79695
+ SIOCGLOADF = -0x3ffb967e
+ SIOCGLOWAT = 0x40047303
+ SIOCGNETOPT = -0x3ffe96a5
+ SIOCGNETOPT1 = -0x3fdf967f
+ SIOCGNMTUS = 0x2000696e
+ SIOCGPGRP = 0x40047309
+ SIOCGSIZIFCONF = 0x4004696a
+ SIOCGSRCFILTER = -0x3fe796cb
+ SIOCGTUNEPHASE = -0x3ffb9676
+ SIOCGX25XLATE = -0x3fd7969c
+ SIOCIFATTACH = -0x7fdf9699
+ SIOCIFDETACH = -0x7fdf969a
+ SIOCIFGETPKEY = -0x7fdf969b
+ SIOCIF_ATM_DARP = -0x7fdf9683
+ SIOCIF_ATM_DUMPARP = -0x7fdf9685
+ SIOCIF_ATM_GARP = -0x7fdf9682
+ SIOCIF_ATM_IDLE = -0x7fdf9686
+ SIOCIF_ATM_SARP = -0x7fdf9681
+ SIOCIF_ATM_SNMPARP = -0x7fdf9687
+ SIOCIF_ATM_SVC = -0x7fdf9684
+ SIOCIF_ATM_UBR = -0x7fdf9688
+ SIOCIF_DEVHEALTH = -0x7ffb966c
+ SIOCIF_IB_ARP_INCOMP = -0x7fdf9677
+ SIOCIF_IB_ARP_TIMER = -0x7fdf9678
+ SIOCIF_IB_CLEAR_PINFO = -0x3fdf966f
+ SIOCIF_IB_DEL_ARP = -0x7fdf967f
+ SIOCIF_IB_DEL_PINFO = -0x3fdf9670
+ SIOCIF_IB_DUMP_ARP = -0x7fdf9680
+ SIOCIF_IB_GET_ARP = -0x7fdf967e
+ SIOCIF_IB_GET_INFO = -0x3f879675
+ SIOCIF_IB_GET_STATS = -0x3f879672
+ SIOCIF_IB_NOTIFY_ADDR_REM = -0x3f87966a
+ SIOCIF_IB_RESET_STATS = -0x3f879671
+ SIOCIF_IB_RESIZE_CQ = -0x7fdf9679
+ SIOCIF_IB_SET_ARP = -0x7fdf967d
+ SIOCIF_IB_SET_PKEY = -0x7fdf967c
+ SIOCIF_IB_SET_PORT = -0x7fdf967b
+ SIOCIF_IB_SET_QKEY = -0x7fdf9676
+ SIOCIF_IB_SET_QSIZE = -0x7fdf967a
+ SIOCLISTIFVIPA = 0x20006944
+ SIOCSARP = -0x7fb396e2
+ SIOCSHIWAT = 0xffffffff80047300
+ SIOCSIFADDR = -0x7fd796f4
+ SIOCSIFADDRORI = -0x7fdb9673
+ SIOCSIFBRDADDR = -0x7fd796ed
+ SIOCSIFDSTADDR = -0x7fd796f2
+ SIOCSIFFLAGS = -0x7fd796f0
+ SIOCSIFGIDLIST = 0x20006969
+ SIOCSIFMETRIC = -0x7fd796e8
+ SIOCSIFMTU = -0x7fd796a8
+ SIOCSIFNETDUMP = -0x7fd796e4
+ SIOCSIFNETMASK = -0x7fd796ea
+ SIOCSIFOPTIONS = -0x7fd796d7
+ SIOCSIFSUBCHAN = -0x7fd796e5
+ SIOCSISNO = -0x7fd79694
+ SIOCSLOADF = -0x3ffb967d
+ SIOCSLOWAT = 0xffffffff80047302
+ SIOCSNETOPT = -0x7ffe96a6
+ SIOCSPGRP = 0xffffffff80047308
+ SIOCSX25XLATE = -0x7fd7969d
+ SOCK_CONN_DGRAM = 0x6
+ SOCK_DGRAM = 0x2
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x1
+ SOL_SOCKET = 0xffff
+ SOMAXCONN = 0x400
+ SO_ACCEPTCONN = 0x2
+ SO_AUDIT = 0x8000
+ SO_BROADCAST = 0x20
+ SO_CKSUMRECV = 0x800
+ SO_DEBUG = 0x1
+ SO_DONTROUTE = 0x10
+ SO_ERROR = 0x1007
+ SO_KEEPALIVE = 0x8
+ SO_KERNACCEPT = 0x2000
+ SO_LINGER = 0x80
+ SO_NOMULTIPATH = 0x4000
+ SO_NOREUSEADDR = 0x1000
+ SO_OOBINLINE = 0x100
+ SO_PEERID = 0x1009
+ SO_RCVBUF = 0x1002
+ SO_RCVLOWAT = 0x1004
+ SO_RCVTIMEO = 0x1006
+ SO_REUSEADDR = 0x4
+ SO_REUSEPORT = 0x200
+ SO_SNDBUF = 0x1001
+ SO_SNDLOWAT = 0x1003
+ SO_SNDTIMEO = 0x1005
+ SO_TIMESTAMPNS = 0x100a
+ SO_TYPE = 0x1008
+ SO_USELOOPBACK = 0x40
+ SO_USE_IFBUFS = 0x400
+ S_BANDURG = 0x400
+ S_EMODFMT = 0x3c000000
+ S_ENFMT = 0x400
+ S_ERROR = 0x100
+ S_HANGUP = 0x200
+ S_HIPRI = 0x2
+ S_ICRYPTO = 0x80000
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFJOURNAL = 0x10000
+ S_IFLNK = 0xa000
+ S_IFMPX = 0x2200
+ S_IFMT = 0xf000
+ S_IFPDIR = 0x4000000
+ S_IFPSDIR = 0x8000000
+ S_IFPSSDIR = 0xc000000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IFSYSEA = 0x30000000
+ S_INPUT = 0x1
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_ITCB = 0x1000000
+ S_ITP = 0x800000
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXACL = 0x2000000
+ S_IXATTR = 0x40000
+ S_IXGRP = 0x8
+ S_IXINTERFACE = 0x100000
+ S_IXMOD = 0x40000000
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ S_MSG = 0x8
+ S_OUTPUT = 0x4
+ S_RDBAND = 0x20
+ S_RDNORM = 0x10
+ S_RESERVED1 = 0x20000
+ S_RESERVED2 = 0x200000
+ S_RESERVED3 = 0x400000
+ S_RESERVED4 = 0x80000000
+ S_RESFMT1 = 0x10000000
+ S_RESFMT10 = 0x34000000
+ S_RESFMT11 = 0x38000000
+ S_RESFMT12 = 0x3c000000
+ S_RESFMT2 = 0x14000000
+ S_RESFMT3 = 0x18000000
+ S_RESFMT4 = 0x1c000000
+ S_RESFMT5 = 0x20000000
+ S_RESFMT6 = 0x24000000
+ S_RESFMT7 = 0x28000000
+ S_RESFMT8 = 0x2c000000
+ S_WRBAND = 0x80
+ S_WRNORM = 0x40
+ TCP_24DAYS_WORTH_OF_SLOWTICKS = 0x3f4800
+ TCP_ACLADD = 0x23
+ TCP_ACLBIND = 0x26
+ TCP_ACLCLEAR = 0x22
+ TCP_ACLDEL = 0x24
+ TCP_ACLDENY = 0x8
+ TCP_ACLFLUSH = 0x21
+ TCP_ACLGID = 0x1
+ TCP_ACLLS = 0x25
+ TCP_ACLSUBNET = 0x4
+ TCP_ACLUID = 0x2
+ TCP_CWND_DF = 0x16
+ TCP_CWND_IF = 0x15
+ TCP_DELAY_ACK_FIN = 0x2
+ TCP_DELAY_ACK_SYN = 0x1
+ TCP_FASTNAME = 0x101080a
+ TCP_KEEPCNT = 0x13
+ TCP_KEEPIDLE = 0x11
+ TCP_KEEPINTVL = 0x12
+ TCP_LSPRIV = 0x29
+ TCP_LUID = 0x20
+ TCP_MAXBURST = 0x8
+ TCP_MAXDF = 0x64
+ TCP_MAXIF = 0x64
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAXWINDOWSCALE = 0xe
+ TCP_MAX_SACK = 0x4
+ TCP_MSS = 0x5b4
+ TCP_NODELAY = 0x1
+ TCP_NODELAYACK = 0x14
+ TCP_NOREDUCE_CWND_EXIT_FRXMT = 0x19
+ TCP_NOREDUCE_CWND_IN_FRXMT = 0x18
+ TCP_NOTENTER_SSTART = 0x17
+ TCP_OPT = 0x19
+ TCP_RFC1323 = 0x4
+ TCP_SETPRIV = 0x27
+ TCP_STDURG = 0x10
+ TCP_TIMESTAMP_OPTLEN = 0xc
+ TCP_UNSETPRIV = 0x28
+ TIOCCBRK = 0x2000747a
+ TIOCCDTR = 0x20007478
+ TIOCCONS = 0xffffffff80047462
+ TIOCEXCL = 0x2000740d
+ TIOCFLUSH = 0xffffffff80047410
+ TIOCGETC = 0x40067412
+ TIOCGETD = 0x40047400
+ TIOCGETP = 0x40067408
+ TIOCGLTC = 0x40067474
+ TIOCGPGRP = 0x40047477
+ TIOCGSID = 0x40047448
+ TIOCGSIZE = 0x40087468
+ TIOCGWINSZ = 0x40087468
+ TIOCHPCL = 0x20007402
+ TIOCLBIC = 0xffffffff8004747e
+ TIOCLBIS = 0xffffffff8004747f
+ TIOCLGET = 0x4004747c
+ TIOCLSET = 0xffffffff8004747d
+ TIOCMBIC = 0xffffffff8004746b
+ TIOCMBIS = 0xffffffff8004746c
+ TIOCMGET = 0x4004746a
+ TIOCMIWAIT = 0xffffffff80047464
+ TIOCMODG = 0x40047403
+ TIOCMODS = 0xffffffff80047404
+ TIOCMSET = 0xffffffff8004746d
+ TIOCM_CAR = 0x40
+ TIOCM_CD = 0x40
+ TIOCM_CTS = 0x20
+ TIOCM_DSR = 0x100
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x80
+ TIOCM_RNG = 0x80
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x10
+ TIOCM_ST = 0x8
+ TIOCNOTTY = 0x20007471
+ TIOCNXCL = 0x2000740e
+ TIOCOUTQ = 0x40047473
+ TIOCPKT = 0xffffffff80047470
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCREMOTE = 0xffffffff80047469
+ TIOCSBRK = 0x2000747b
+ TIOCSDTR = 0x20007479
+ TIOCSETC = 0xffffffff80067411
+ TIOCSETD = 0xffffffff80047401
+ TIOCSETN = 0xffffffff8006740a
+ TIOCSETP = 0xffffffff80067409
+ TIOCSLTC = 0xffffffff80067475
+ TIOCSPGRP = 0xffffffff80047476
+ TIOCSSIZE = 0xffffffff80087467
+ TIOCSTART = 0x2000746e
+ TIOCSTI = 0xffffffff80017472
+ TIOCSTOP = 0x2000746f
+ TIOCSWINSZ = 0xffffffff80087467
+ TIOCUCNTL = 0xffffffff80047466
+ TOSTOP = 0x10000
+ VTDELAY = 0x2000
+ WPARSTART = 0x1
+ WPARSTOP = 0x2
+ WPARTTYNAME = "Global"
+ _FDATAFLUSH = 0x2000000000
+)
+
+// Errors
+const (
+ E2BIG = Errno(0x7)
+ EACCES = Errno(0xd)
+ EADDRINUSE = Errno(0x43)
+ EADDRNOTAVAIL = Errno(0x44)
+ EAFNOSUPPORT = Errno(0x42)
+ EAGAIN = Errno(0xb)
+ EALREADY = Errno(0x38)
+ EBADF = Errno(0x9)
+ EBADMSG = Errno(0x78)
+ EBUSY = Errno(0x10)
+ ECANCELED = Errno(0x75)
+ ECHILD = Errno(0xa)
+ ECHRNG = Errno(0x25)
+ ECLONEME = Errno(0x52)
+ ECONNABORTED = Errno(0x48)
+ ECONNREFUSED = Errno(0x4f)
+ ECONNRESET = Errno(0x49)
+ ECORRUPT = Errno(0x59)
+ EDEADLK = Errno(0x2d)
+ EDESTADDREQ = Errno(0x3a)
+ EDESTADDRREQ = Errno(0x3a)
+ EDIST = Errno(0x35)
+ EDOM = Errno(0x21)
+ EDQUOT = Errno(0x58)
+ EEXIST = Errno(0x11)
+ EFAULT = Errno(0xe)
+ EFBIG = Errno(0x1b)
+ EFORMAT = Errno(0x30)
+ EHOSTDOWN = Errno(0x50)
+ EHOSTUNREACH = Errno(0x51)
+ EIDRM = Errno(0x24)
+ EILSEQ = Errno(0x74)
+ EINPROGRESS = Errno(0x37)
+ EINTR = Errno(0x4)
+ EINVAL = Errno(0x16)
+ EIO = Errno(0x5)
+ EISCONN = Errno(0x4b)
+ EISDIR = Errno(0x15)
+ EL2HLT = Errno(0x2c)
+ EL2NSYNC = Errno(0x26)
+ EL3HLT = Errno(0x27)
+ EL3RST = Errno(0x28)
+ ELNRNG = Errno(0x29)
+ ELOOP = Errno(0x55)
+ EMEDIA = Errno(0x6e)
+ EMFILE = Errno(0x18)
+ EMLINK = Errno(0x1f)
+ EMSGSIZE = Errno(0x3b)
+ EMULTIHOP = Errno(0x7d)
+ ENAMETOOLONG = Errno(0x56)
+ ENETDOWN = Errno(0x45)
+ ENETRESET = Errno(0x47)
+ ENETUNREACH = Errno(0x46)
+ ENFILE = Errno(0x17)
+ ENOATTR = Errno(0x70)
+ ENOBUFS = Errno(0x4a)
+ ENOCONNECT = Errno(0x32)
+ ENOCSI = Errno(0x2b)
+ ENODATA = Errno(0x7a)
+ ENODEV = Errno(0x13)
+ ENOENT = Errno(0x2)
+ ENOEXEC = Errno(0x8)
+ ENOLCK = Errno(0x31)
+ ENOLINK = Errno(0x7e)
+ ENOMEM = Errno(0xc)
+ ENOMSG = Errno(0x23)
+ ENOPROTOOPT = Errno(0x3d)
+ ENOSPC = Errno(0x1c)
+ ENOSR = Errno(0x76)
+ ENOSTR = Errno(0x7b)
+ ENOSYS = Errno(0x6d)
+ ENOTBLK = Errno(0xf)
+ ENOTCONN = Errno(0x4c)
+ ENOTDIR = Errno(0x14)
+ ENOTEMPTY = Errno(0x11)
+ ENOTREADY = Errno(0x2e)
+ ENOTRECOVERABLE = Errno(0x5e)
+ ENOTRUST = Errno(0x72)
+ ENOTSOCK = Errno(0x39)
+ ENOTSUP = Errno(0x7c)
+ ENOTTY = Errno(0x19)
+ ENXIO = Errno(0x6)
+ EOPNOTSUPP = Errno(0x40)
+ EOVERFLOW = Errno(0x7f)
+ EOWNERDEAD = Errno(0x5f)
+ EPERM = Errno(0x1)
+ EPFNOSUPPORT = Errno(0x41)
+ EPIPE = Errno(0x20)
+ EPROCLIM = Errno(0x53)
+ EPROTO = Errno(0x79)
+ EPROTONOSUPPORT = Errno(0x3e)
+ EPROTOTYPE = Errno(0x3c)
+ ERANGE = Errno(0x22)
+ EREMOTE = Errno(0x5d)
+ ERESTART = Errno(0x52)
+ EROFS = Errno(0x1e)
+ ESAD = Errno(0x71)
+ ESHUTDOWN = Errno(0x4d)
+ ESOCKTNOSUPPORT = Errno(0x3f)
+ ESOFT = Errno(0x6f)
+ ESPIPE = Errno(0x1d)
+ ESRCH = Errno(0x3)
+ ESTALE = Errno(0x34)
+ ESYSERROR = Errno(0x5a)
+ ETIME = Errno(0x77)
+ ETIMEDOUT = Errno(0x4e)
+ ETOOMANYREFS = Errno(0x73)
+ ETXTBSY = Errno(0x1a)
+ EUNATCH = Errno(0x2a)
+ EUSERS = Errno(0x54)
+ EWOULDBLOCK = Errno(0xb)
+ EWRPROTECT = Errno(0x2f)
+ EXDEV = Errno(0x12)
+)
+
+// Signals
+const (
+ SIGABRT = Signal(0x6)
+ SIGAIO = Signal(0x17)
+ SIGALRM = Signal(0xe)
+ SIGALRM1 = Signal(0x26)
+ SIGBUS = Signal(0xa)
+ SIGCAPI = Signal(0x31)
+ SIGCHLD = Signal(0x14)
+ SIGCLD = Signal(0x14)
+ SIGCONT = Signal(0x13)
+ SIGCPUFAIL = Signal(0x3b)
+ SIGDANGER = Signal(0x21)
+ SIGEMT = Signal(0x7)
+ SIGFPE = Signal(0x8)
+ SIGGRANT = Signal(0x3c)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x17)
+ SIGIOINT = Signal(0x10)
+ SIGIOT = Signal(0x6)
+ SIGKAP = Signal(0x3c)
+ SIGKILL = Signal(0x9)
+ SIGLOST = Signal(0x6)
+ SIGMAX = Signal(0xff)
+ SIGMAX32 = Signal(0x3f)
+ SIGMAX64 = Signal(0xff)
+ SIGMIGRATE = Signal(0x23)
+ SIGMSG = Signal(0x1b)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x17)
+ SIGPRE = Signal(0x24)
+ SIGPROF = Signal(0x20)
+ SIGPTY = Signal(0x17)
+ SIGPWR = Signal(0x1d)
+ SIGQUIT = Signal(0x3)
+ SIGRECONFIG = Signal(0x3a)
+ SIGRETRACT = Signal(0x3d)
+ SIGSAK = Signal(0x3f)
+ SIGSEGV = Signal(0xb)
+ SIGSOUND = Signal(0x3e)
+ SIGSTOP = Signal(0x11)
+ SIGSYS = Signal(0xc)
+ SIGSYSERROR = Signal(0x30)
+ SIGTALRM = Signal(0x26)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x12)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGURG = Signal(0x10)
+ SIGUSR1 = Signal(0x1e)
+ SIGUSR2 = Signal(0x1f)
+ SIGVIRT = Signal(0x25)
+ SIGVTALRM = Signal(0x22)
+ SIGWAITING = Signal(0x27)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
+
+// Error table
+var errors = [...]string{
+ 1: "not owner",
+ 2: "no such file or directory",
+ 3: "no such process",
+ 4: "interrupted system call",
+ 5: "I/O error",
+ 6: "no such device or address",
+ 7: "arg list too long",
+ 8: "exec format error",
+ 9: "bad file number",
+ 10: "no child processes",
+ 11: "resource temporarily unavailable",
+ 12: "not enough space",
+ 13: "permission denied",
+ 14: "bad address",
+ 15: "block device required",
+ 16: "device busy",
+ 17: "file exists",
+ 18: "cross-device link",
+ 19: "no such device",
+ 20: "not a directory",
+ 21: "is a directory",
+ 22: "invalid argument",
+ 23: "file table overflow",
+ 24: "too many open files",
+ 25: "not a typewriter",
+ 26: "text file busy",
+ 27: "file too large",
+ 28: "no space left on device",
+ 29: "illegal seek",
+ 30: "read-only file system",
+ 31: "too many links",
+ 32: "broken pipe",
+ 33: "argument out of domain",
+ 34: "result too large",
+ 35: "no message of desired type",
+ 36: "identifier removed",
+ 37: "channel number out of range",
+ 38: "level 2 not synchronized",
+ 39: "level 3 halted",
+ 40: "level 3 reset",
+ 41: "link number out of range",
+ 42: "protocol driver not attached",
+ 43: "no CSI structure available",
+ 44: "level 2 halted",
+ 45: "deadlock condition if locked",
+ 46: "device not ready",
+ 47: "write-protected media",
+ 48: "unformatted or incompatible media",
+ 49: "no locks available",
+ 50: "cannot Establish Connection",
+ 52: "missing file or filesystem",
+ 53: "requests blocked by Administrator",
+ 55: "operation now in progress",
+ 56: "operation already in progress",
+ 57: "socket operation on non-socket",
+ 58: "destination address required",
+ 59: "message too long",
+ 60: "protocol wrong type for socket",
+ 61: "protocol not available",
+ 62: "protocol not supported",
+ 63: "socket type not supported",
+ 64: "operation not supported on socket",
+ 65: "protocol family not supported",
+ 66: "addr family not supported by protocol",
+ 67: "address already in use",
+ 68: "can't assign requested address",
+ 69: "network is down",
+ 70: "network is unreachable",
+ 71: "network dropped connection on reset",
+ 72: "software caused connection abort",
+ 73: "connection reset by peer",
+ 74: "no buffer space available",
+ 75: "socket is already connected",
+ 76: "socket is not connected",
+ 77: "can't send after socket shutdown",
+ 78: "connection timed out",
+ 79: "connection refused",
+ 80: "host is down",
+ 81: "no route to host",
+ 82: "restart the system call",
+ 83: "too many processes",
+ 84: "too many users",
+ 85: "too many levels of symbolic links",
+ 86: "file name too long",
+ 88: "disk quota exceeded",
+ 89: "invalid file system control data detected",
+ 90: "for future use ",
+ 93: "item is not local to host",
+ 94: "state not recoverable ",
+ 95: "previous owner died ",
+ 109: "function not implemented",
+ 110: "media surface error",
+ 111: "I/O completed, but needs relocation",
+ 112: "no attribute found",
+ 113: "security Authentication Denied",
+ 114: "not a Trusted Program",
+ 115: "too many references: can't splice",
+ 116: "invalid wide character",
+ 117: "asynchronous I/O cancelled",
+ 118: "out of STREAMS resources",
+ 119: "system call timed out",
+ 120: "next message has wrong type",
+ 121: "error in protocol",
+ 122: "no message on stream head read q",
+ 123: "fd not associated with a stream",
+ 124: "unsupported attribute value",
+ 125: "multihop is not allowed",
+ 126: "the server link has been severed",
+ 127: "value too large to be stored in data type",
+}
+
+// Signal table
+var signals = [...]string{
+ 1: "hangup",
+ 2: "interrupt",
+ 3: "quit",
+ 4: "illegal instruction",
+ 5: "trace/BPT trap",
+ 6: "IOT/Abort trap",
+ 7: "EMT trap",
+ 8: "floating point exception",
+ 9: "killed",
+ 10: "bus error",
+ 11: "segmentation fault",
+ 12: "bad system call",
+ 13: "broken pipe",
+ 14: "alarm clock",
+ 15: "terminated",
+ 16: "urgent I/O condition",
+ 17: "stopped (signal)",
+ 18: "stopped",
+ 19: "continued",
+ 20: "child exited",
+ 21: "stopped (tty input)",
+ 22: "stopped (tty output)",
+ 23: "I/O possible/complete",
+ 24: "cputime limit exceeded",
+ 25: "filesize limit exceeded",
+ 27: "input device data",
+ 28: "window size changes",
+ 29: "power-failure",
+ 30: "user defined signal 1",
+ 31: "user defined signal 2",
+ 32: "profiling timer expired",
+ 33: "paging space low",
+ 34: "virtual timer expired",
+ 35: "signal 35",
+ 36: "signal 36",
+ 37: "signal 37",
+ 38: "signal 38",
+ 39: "signal 39",
+ 48: "signal 48",
+ 49: "signal 49",
+ 58: "signal 58",
+ 59: "CPU Failure Predicted",
+ 60: "monitor mode granted",
+ 61: "monitor mode retracted",
+ 62: "sound completed",
+ 63: "secure attention",
+ 255: "signal 255",
+}
diff --git a/src/syscall/zsyscall_aix_ppc64.go b/src/syscall/zsyscall_aix_ppc64.go
new file mode 100644
index 0000000000..3ea11f8af3
--- /dev/null
+++ b/src/syscall/zsyscall_aix_ppc64.go
@@ -0,0 +1,1341 @@
+// mksyscall_libc.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
+// Code generated by the command above; DO NOT EDIT.
+
+// +build aix,ppc64
+
+package syscall
+
+import "unsafe"
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_pipe pipe "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_readlink readlink "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_utimes utimes "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_utimensat utimensat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getcwd getcwd "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getgroups getgroups "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getdirent getdirent "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_bind bind "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_connect connect "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getkerninfo getkerninfo "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Listen listen "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_socket socket "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_socketpair socketpair "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getpeername getpeername "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getsockname getsockname "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_sendto sendto "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Shutdown shutdown "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_accept accept "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Openat openat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_ptrace64 ptrace64 "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Acct acct "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Chdir chdir "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Chmod chmod "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Chown chown "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Close close "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Dup dup "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Faccessat faccessat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fchdir fchdir "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fchmod fchmod "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fchmodat fchmodat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fchown fchown "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fchownat fchownat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fpathconf fpathconf "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fstat fstat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fstatfs fstatfs "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Ftruncate ftruncate "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Fsync fsync "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getgid getgid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getpid getpid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Geteuid geteuid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getegid getegid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getppid getppid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getrlimit getrlimit "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Getuid getuid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Kill kill "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Lchown lchown "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Link link "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Lstat lstat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Mkdir mkdir "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Mkdirat mkdirat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Mknodat mknodat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Open open "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Pread pread "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Pwrite pwrite "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_read read "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Reboot reboot "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Rename rename "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Renameat renameat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Rmdir rmdir "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Setegid setegid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Seteuid seteuid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Setgid setgid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Setpgid setpgid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Setregid setregid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Setreuid setreuid "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Stat stat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Statfs statfs "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Symlink symlink "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Truncate truncate "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Umask umask "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Unlink unlink "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_Uname uname "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_write write "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_mmap mmap "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o"
+
+//go:linkname libc_fcntl libc_fcntl
+//go:linkname libc_dup2 libc_dup2
+//go:linkname libc_pipe libc_pipe
+//go:linkname libc_readlink libc_readlink
+//go:linkname libc_utimes libc_utimes
+//go:linkname libc_utimensat libc_utimensat
+//go:linkname libc_unlinkat libc_unlinkat
+//go:linkname libc_getcwd libc_getcwd
+//go:linkname libc_getgroups libc_getgroups
+//go:linkname libc_setgroups libc_setgroups
+//go:linkname libc_getdirent libc_getdirent
+//go:linkname libc_wait4 libc_wait4
+//go:linkname libc_bind libc_bind
+//go:linkname libc_connect libc_connect
+//go:linkname libc_Getkerninfo libc_Getkerninfo
+//go:linkname libc_getsockopt libc_getsockopt
+//go:linkname libc_Listen libc_Listen
+//go:linkname libc_setsockopt libc_setsockopt
+//go:linkname libc_socket libc_socket
+//go:linkname libc_socketpair libc_socketpair
+//go:linkname libc_getpeername libc_getpeername
+//go:linkname libc_getsockname libc_getsockname
+//go:linkname libc_recvfrom libc_recvfrom
+//go:linkname libc_sendto libc_sendto
+//go:linkname libc_Shutdown libc_Shutdown
+//go:linkname libc_recvmsg libc_recvmsg
+//go:linkname libc_sendmsg libc_sendmsg
+//go:linkname libc_accept libc_accept
+//go:linkname libc_Openat libc_Openat
+//go:linkname libc_ptrace64 libc_ptrace64
+//go:linkname libc_Acct libc_Acct
+//go:linkname libc_Chdir libc_Chdir
+//go:linkname libc_Chmod libc_Chmod
+//go:linkname libc_Chown libc_Chown
+//go:linkname libc_Close libc_Close
+//go:linkname libc_Dup libc_Dup
+//go:linkname libc_Faccessat libc_Faccessat
+//go:linkname libc_Fchdir libc_Fchdir
+//go:linkname libc_Fchmod libc_Fchmod
+//go:linkname libc_Fchmodat libc_Fchmodat
+//go:linkname libc_Fchown libc_Fchown
+//go:linkname libc_Fchownat libc_Fchownat
+//go:linkname libc_Fpathconf libc_Fpathconf
+//go:linkname libc_Fstat libc_Fstat
+//go:linkname libc_Fstatfs libc_Fstatfs
+//go:linkname libc_Ftruncate libc_Ftruncate
+//go:linkname libc_Fsync libc_Fsync
+//go:linkname libc_Getgid libc_Getgid
+//go:linkname libc_Getpid libc_Getpid
+//go:linkname libc_Geteuid libc_Geteuid
+//go:linkname libc_Getegid libc_Getegid
+//go:linkname libc_Getppid libc_Getppid
+//go:linkname libc_Getrlimit libc_Getrlimit
+//go:linkname libc_Getuid libc_Getuid
+//go:linkname libc_Kill libc_Kill
+//go:linkname libc_Lchown libc_Lchown
+//go:linkname libc_Link libc_Link
+//go:linkname libc_Lstat libc_Lstat
+//go:linkname libc_Mkdir libc_Mkdir
+//go:linkname libc_Mkdirat libc_Mkdirat
+//go:linkname libc_Mknodat libc_Mknodat
+//go:linkname libc_Open libc_Open
+//go:linkname libc_Pread libc_Pread
+//go:linkname libc_Pwrite libc_Pwrite
+//go:linkname libc_read libc_read
+//go:linkname libc_Reboot libc_Reboot
+//go:linkname libc_Rename libc_Rename
+//go:linkname libc_Renameat libc_Renameat
+//go:linkname libc_Rmdir libc_Rmdir
+//go:linkname libc_lseek libc_lseek
+//go:linkname libc_Setegid libc_Setegid
+//go:linkname libc_Seteuid libc_Seteuid
+//go:linkname libc_Setgid libc_Setgid
+//go:linkname libc_Setpgid libc_Setpgid
+//go:linkname libc_Setregid libc_Setregid
+//go:linkname libc_Setreuid libc_Setreuid
+//go:linkname libc_Stat libc_Stat
+//go:linkname libc_Statfs libc_Statfs
+//go:linkname libc_Symlink libc_Symlink
+//go:linkname libc_Truncate libc_Truncate
+//go:linkname libc_Umask libc_Umask
+//go:linkname libc_Unlink libc_Unlink
+//go:linkname libc_Uname libc_Uname
+//go:linkname libc_write libc_write
+//go:linkname libc_gettimeofday libc_gettimeofday
+//go:linkname libc_mmap libc_mmap
+//go:linkname libc_munmap libc_munmap
+
+type libcFunc uintptr
+
+var (
+ libc_fcntl,
+ libc_dup2,
+ libc_pipe,
+ libc_readlink,
+ libc_utimes,
+ libc_utimensat,
+ libc_unlinkat,
+ libc_getcwd,
+ libc_getgroups,
+ libc_setgroups,
+ libc_getdirent,
+ libc_wait4,
+ libc_bind,
+ libc_connect,
+ libc_Getkerninfo,
+ libc_getsockopt,
+ libc_Listen,
+ libc_setsockopt,
+ libc_socket,
+ libc_socketpair,
+ libc_getpeername,
+ libc_getsockname,
+ libc_recvfrom,
+ libc_sendto,
+ libc_Shutdown,
+ libc_recvmsg,
+ libc_sendmsg,
+ libc_accept,
+ libc_Openat,
+ libc_ptrace64,
+ libc_Acct,
+ libc_Chdir,
+ libc_Chmod,
+ libc_Chown,
+ libc_Close,
+ libc_Dup,
+ libc_Faccessat,
+ libc_Fchdir,
+ libc_Fchmod,
+ libc_Fchmodat,
+ libc_Fchown,
+ libc_Fchownat,
+ libc_Fpathconf,
+ libc_Fstat,
+ libc_Fstatfs,
+ libc_Ftruncate,
+ libc_Fsync,
+ libc_Getgid,
+ libc_Getpid,
+ libc_Geteuid,
+ libc_Getegid,
+ libc_Getppid,
+ libc_Getrlimit,
+ libc_Getuid,
+ libc_Kill,
+ libc_Lchown,
+ libc_Link,
+ libc_Lstat,
+ libc_Mkdir,
+ libc_Mkdirat,
+ libc_Mknodat,
+ libc_Open,
+ libc_Pread,
+ libc_Pwrite,
+ libc_read,
+ libc_Reboot,
+ libc_Rename,
+ libc_Renameat,
+ libc_Rmdir,
+ libc_lseek,
+ libc_Setegid,
+ libc_Seteuid,
+ libc_Setgid,
+ libc_Setpgid,
+ libc_Setregid,
+ libc_Setreuid,
+ libc_Stat,
+ libc_Statfs,
+ libc_Symlink,
+ libc_Truncate,
+ libc_Umask,
+ libc_Unlink,
+ libc_Uname,
+ libc_write,
+ libc_gettimeofday,
+ libc_mmap,
+ libc_munmap libcFunc
+)
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func dup2(old int, new int) (val int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_dup2)), 2, uintptr(old), uintptr(new), 0, 0, 0, 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]_C_int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_pipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlink(path string, buf []byte, bufSize uint64) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ if len(buf) > 0 {
+ _p1 = &buf[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_readlink)), 4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), uintptr(bufSize), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_utimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_utimensat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_unlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getcwd(buf *byte, size uint64) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getcwd)), 2, uintptr(unsafe.Pointer(buf)), uintptr(size), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
+ r0, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_getgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(ngid int, gid *_Gid_t) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_setgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getdirent(fd int, buf []byte) (n int, err error) {
+ var _p0 *byte
+ if len(buf) > 0 {
+ _p0 = &buf[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getdirent)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_wait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(status)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = Pid_t(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getkerninfo(op int32, where uintptr, size uintptr, arg int64) (i int32, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Getkerninfo)), 4, uintptr(op), uintptr(where), uintptr(size), uintptr(arg), 0, 0)
+ i = int32(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, backlog int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_setsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_getpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 *byte
+ if len(p) > 0 {
+ _p0 = &p[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_recvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 *byte
+ if len(buf) > 0 {
+ _p0 = &buf[0]
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(s int, how int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Shutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_accept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Openat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace64(request int, id int64, addr int64, data int, buff uintptr) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_ptrace64)), 5, uintptr(request), uintptr(id), uintptr(addr), uintptr(data), uintptr(buff), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Acct)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Chdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chmod(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Chmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Chown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Close)), 1, uintptr(fd), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(fd int) (nfd int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Dup)), 1, uintptr(fd), 0, 0, 0, 0, 0)
+ nfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Faccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fpathconf(fd int, name int) (val int, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fstatfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Ftruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getgid)), 0, 0, 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getpid)), 0, 0, 0, 0, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+ r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Geteuid)), 0, 0, 0, 0, 0, 0, 0)
+ euid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Getegid)), 0, 0, 0, 0, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Getppid)), 0, 0, 0, 0, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getuid)), 0, 0, 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, signum Signal) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Kill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Lchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Link(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Link)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Lstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdir(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Mkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Mkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Mknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Open)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 *byte
+ if len(p) > 0 {
+ _p0 = &p[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Pread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 *byte
+ if len(p) > 0 {
+ _p0 = &p[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Pwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 *byte
+ if len(p) > 0 {
+ _p0 = &p[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_read)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Reboot(how int) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Reboot)), 1, uintptr(how), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rename(from string, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Rename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Renameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rmdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Rmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0)
+ newoffset = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setegid(egid int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setegid)), 1, uintptr(egid), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seteuid(euid int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Seteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setgid(gid int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setgid)), 1, uintptr(gid), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Stat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Statfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlink(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Symlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Truncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(newmask int) (oldmask int) {
+ r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Umask)), 1, uintptr(newmask), 0, 0, 0, 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlink(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Unlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+ _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Uname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 *byte
+ if len(p) > 0 {
+ _p0 = &p[0]
+ }
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_write)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func gettimeofday(tv *Timeval, tzp *Timezone) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_gettimeofday)), 2, uintptr(unsafe.Pointer(tv)), uintptr(unsafe.Pointer(tzp)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
+ r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_mmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
+ ret = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_munmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/src/syscall/zsyscall_freebsd_386.go b/src/syscall/zsyscall_freebsd_386.go
index 451da4d6fe..ba7ea27f8d 100644
--- a/src/syscall/zsyscall_freebsd_386.go
+++ b/src/syscall/zsyscall_freebsd_386.go
@@ -463,7 +463,7 @@ func Fpathconf(fd int, name int) (val int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
+func fstat(fd int, stat *stat_freebsd11_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -473,7 +473,32 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
+func fstat_freebsd12(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(_SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(_SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -483,6 +508,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) {
+ _, _, e1 := Syscall(_SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -503,7 +538,7 @@ func Ftruncate(fd int, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
@@ -520,6 +555,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getdtablesize() (size int) {
r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
size = int(r0)
@@ -721,7 +773,7 @@ func Listen(s int, backlog int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Lstat(path string, stat *Stat_t) (err error) {
+func lstat(path string, stat *stat_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -766,7 +818,7 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Mknod(path string, mode uint32, dev int) (err error) {
+func mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -781,6 +833,21 @@ func Mknod(path string, mode uint32, dev int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(_SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
@@ -1093,7 +1160,7 @@ func Setuid(uid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Stat(path string, stat *Stat_t) (err error) {
+func stat(path string, stat *stat_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -1108,7 +1175,7 @@ func Stat(path string, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Statfs(path string, stat *Statfs_t) (err error) {
+func statfs(path string, stat *statfs_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -1123,6 +1190,21 @@ func Statfs(path string, stat *Statfs_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func statfs_freebsd12(path string, stat *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(_SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_freebsd_amd64.go b/src/syscall/zsyscall_freebsd_amd64.go
index 0312ca347c..4b519a7f7f 100644
--- a/src/syscall/zsyscall_freebsd_amd64.go
+++ b/src/syscall/zsyscall_freebsd_amd64.go
@@ -463,7 +463,7 @@ func Fpathconf(fd int, name int) (val int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
+func fstat(fd int, stat *stat_freebsd11_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -473,7 +473,32 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
+func fstat_freebsd12(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(_SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(_SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -483,6 +508,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) {
+ _, _, e1 := Syscall(_SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -503,7 +538,7 @@ func Ftruncate(fd int, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
@@ -520,6 +555,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getdtablesize() (size int) {
r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
size = int(r0)
@@ -721,7 +773,7 @@ func Listen(s int, backlog int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Lstat(path string, stat *Stat_t) (err error) {
+func lstat(path string, stat *stat_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -766,7 +818,7 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Mknod(path string, mode uint32, dev int) (err error) {
+func mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -781,6 +833,21 @@ func Mknod(path string, mode uint32, dev int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(_SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
@@ -1093,7 +1160,7 @@ func Setuid(uid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Stat(path string, stat *Stat_t) (err error) {
+func stat(path string, stat *stat_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -1108,7 +1175,7 @@ func Stat(path string, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Statfs(path string, stat *Statfs_t) (err error) {
+func statfs(path string, stat *statfs_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -1123,6 +1190,21 @@ func Statfs(path string, stat *Statfs_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func statfs_freebsd12(path string, stat *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(_SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_freebsd_arm.go b/src/syscall/zsyscall_freebsd_arm.go
index fcb0733774..e89707654b 100644
--- a/src/syscall/zsyscall_freebsd_arm.go
+++ b/src/syscall/zsyscall_freebsd_arm.go
@@ -463,7 +463,7 @@ func Fpathconf(fd int, name int) (val int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
+func fstat(fd int, stat *stat_freebsd11_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -473,7 +473,32 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
+func fstat_freebsd12(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(_SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(_SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
@@ -483,6 +508,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) {
+ _, _, e1 := Syscall(_SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -503,7 +538,7 @@ func Ftruncate(fd int, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
@@ -520,6 +555,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getdtablesize() (size int) {
r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
size = int(r0)
@@ -721,7 +773,7 @@ func Listen(s int, backlog int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Lstat(path string, stat *Stat_t) (err error) {
+func lstat(path string, stat *stat_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -766,7 +818,7 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Mknod(path string, mode uint32, dev int) (err error) {
+func mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -781,6 +833,21 @@ func Mknod(path string, mode uint32, dev int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(_SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
@@ -1093,7 +1160,7 @@ func Setuid(uid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Stat(path string, stat *Stat_t) (err error) {
+func stat(path string, stat *stat_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -1108,7 +1175,7 @@ func Stat(path string, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Statfs(path string, stat *Statfs_t) (err error) {
+func statfs(path string, stat *statfs_freebsd11_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -1123,6 +1190,21 @@ func Statfs(path string, stat *Statfs_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func statfs_freebsd12(path string, stat *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(_SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_solaris_amd64.go b/src/syscall/zsyscall_solaris_amd64.go
index ecd37902e0..446ebfc503 100644
--- a/src/syscall/zsyscall_solaris_amd64.go
+++ b/src/syscall/zsyscall_solaris_amd64.go
@@ -1,4 +1,4 @@
-// mksyscall_solaris.pl -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go
+// mksyscall_libc.pl -solaris -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go
// Code generated by the command above; DO NOT EDIT.
// +build solaris,amd64
@@ -263,6 +263,8 @@ var (
libc_utimensat libcFunc
)
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getcwd(buf []byte) (n int, err error) {
var _p0 *byte
if len(buf) > 0 {
@@ -276,6 +278,8 @@ func Getcwd(buf []byte) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_getgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
n = int(r0)
@@ -285,6 +289,8 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func setgroups(ngid int, gid *_Gid_t) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_setgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
if e1 != 0 {
@@ -293,6 +299,8 @@ func setgroups(ngid int, gid *_Gid_t) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
val = int(r0)
@@ -302,6 +310,8 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_accept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
fd = int(r0)
@@ -311,6 +321,8 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
n = int(r0)
@@ -320,6 +332,8 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Access(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -333,6 +347,8 @@ func Access(path string, mode uint32) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Adjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0)
if e1 != 0 {
@@ -341,6 +357,8 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -354,6 +372,8 @@ func Chdir(path string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chmod(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -367,6 +387,8 @@ func Chmod(path string, mode uint32) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -380,6 +402,8 @@ func Chown(path string, uid int, gid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -393,6 +417,8 @@ func Chroot(path string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Close)), 1, uintptr(fd), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -401,6 +427,8 @@ func Close(fd int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup(fd int) (nfd int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Dup)), 1, uintptr(fd), 0, 0, 0, 0, 0)
nfd = int(r0)
@@ -410,6 +438,8 @@ func Dup(fd int) (nfd int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fchdir(fd int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -418,6 +448,8 @@ func Fchdir(fd int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0)
if e1 != 0 {
@@ -426,6 +458,8 @@ func Fchmod(fd int, mode uint32) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0)
if e1 != 0 {
@@ -434,6 +468,8 @@ func Fchown(fd int, uid int, gid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fpathconf(fd int, name int) (val int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0)
val = int(r0)
@@ -443,6 +479,8 @@ func Fpathconf(fd int, name int) (val int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
if e1 != 0 {
@@ -451,6 +489,8 @@ func Fstat(fd int, stat *Stat_t) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 *byte
if len(buf) > 0 {
@@ -464,36 +504,48 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getgid() (gid int) {
r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getgid)), 0, 0, 0, 0, 0, 0, 0)
gid = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getpid() (pid int) {
r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getpid)), 0, 0, 0, 0, 0, 0, 0)
pid = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Geteuid() (euid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Geteuid)), 0, 0, 0, 0, 0, 0, 0)
euid = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getegid() (egid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Getegid)), 0, 0, 0, 0, 0, 0, 0)
egid = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getppid() (ppid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Getppid)), 0, 0, 0, 0, 0, 0, 0)
ppid = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getpriority(which int, who int) (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Getpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0)
n = int(r0)
@@ -503,6 +555,8 @@ func Getpriority(which int, who int) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
if e1 != 0 {
@@ -511,6 +565,8 @@ func Getrlimit(which int, lim *Rlimit) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Gettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -519,12 +575,16 @@ func Gettimeofday(tv *Timeval) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getuid() (uid int) {
r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getuid)), 0, 0, 0, 0, 0, 0, 0)
uid = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Kill(pid int, signum Signal) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Kill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0)
if e1 != 0 {
@@ -533,6 +593,8 @@ func Kill(pid int, signum Signal) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -546,6 +608,8 @@ func Lchown(path string, uid int, gid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Link(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -564,6 +628,8 @@ func Link(path string, link string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Listen(s int, backlog int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
if e1 != 0 {
@@ -572,6 +638,8 @@ func Listen(s int, backlog int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -585,6 +653,8 @@ func Lstat(path string, stat *Stat_t) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -598,6 +668,8 @@ func Mkdir(path string, mode uint32) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -611,6 +683,8 @@ func Mknod(path string, mode uint32, dev int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Nanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0)
if e1 != 0 {
@@ -619,6 +693,8 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -633,6 +709,8 @@ func Open(path string, mode int, perm uint32) (fd int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Pathconf(path string, name int) (val int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -647,6 +725,8 @@ func Pathconf(path string, name int) (val int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 *byte
if len(p) > 0 {
@@ -660,6 +740,8 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 *byte
if len(p) > 0 {
@@ -673,6 +755,8 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func read(fd int, p []byte) (n int, err error) {
var _p0 *byte
if len(p) > 0 {
@@ -686,6 +770,8 @@ func read(fd int, p []byte) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Readlink(path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -704,6 +790,8 @@ func Readlink(path string, buf []byte) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Rename(from string, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
@@ -722,6 +810,8 @@ func Rename(from string, to string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Rmdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -735,6 +825,8 @@ func Rmdir(path string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0)
newoffset = int64(r0)
@@ -744,6 +836,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_sendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
written = int(r0)
@@ -753,6 +847,8 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setegid(egid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setegid)), 1, uintptr(egid), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -761,6 +857,8 @@ func Setegid(egid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seteuid(euid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Seteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -769,6 +867,8 @@ func Seteuid(euid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setgid(gid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setgid)), 1, uintptr(gid), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -777,6 +877,8 @@ func Setgid(gid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0)
if e1 != 0 {
@@ -785,6 +887,8 @@ func Setpgid(pid int, pgid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Setpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0)
if e1 != 0 {
@@ -793,6 +897,8 @@ func Setpriority(which int, who int, prio int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0)
if e1 != 0 {
@@ -801,6 +907,8 @@ func Setregid(rgid int, egid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0)
if e1 != 0 {
@@ -809,6 +917,8 @@ func Setreuid(ruid int, euid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
if e1 != 0 {
@@ -817,6 +927,8 @@ func Setrlimit(which int, lim *Rlimit) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setsid() (pid int, err error) {
r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setsid)), 0, 0, 0, 0, 0, 0, 0)
pid = int(r0)
@@ -826,6 +938,8 @@ func Setsid() (pid int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setuid(uid int) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setuid)), 1, uintptr(uid), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -834,6 +948,8 @@ func Setuid(uid int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Shutdown(s int, how int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_shutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0)
if e1 != 0 {
@@ -842,6 +958,8 @@ func Shutdown(s int, how int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -855,6 +973,8 @@ func Stat(path string, stat *Stat_t) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -873,6 +993,8 @@ func Symlink(path string, link string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Sync)), 0, 0, 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -881,6 +1003,8 @@ func Sync() (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -894,6 +1018,8 @@ func Truncate(path string, length int64) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fsync(fd int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0)
if e1 != 0 {
@@ -902,6 +1028,8 @@ func Fsync(fd int) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Ftruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0)
if e1 != 0 {
@@ -910,12 +1038,16 @@ func Ftruncate(fd int, length int64) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Umask(newmask int) (oldmask int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Umask)), 1, uintptr(newmask), 0, 0, 0, 0, 0)
oldmask = int(r0)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -929,6 +1061,8 @@ func Unlink(path string) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func utimes(path string, times *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -942,6 +1076,8 @@ func utimes(path string, times *[2]Timeval) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
if e1 != 0 {
@@ -950,6 +1086,8 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
if e1 != 0 {
@@ -958,6 +1096,8 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_mmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
ret = uintptr(r0)
@@ -967,6 +1107,8 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_munmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0)
if e1 != 0 {
@@ -975,6 +1117,8 @@ func munmap(addr uintptr, length uintptr) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 *byte
if len(buf) > 0 {
@@ -987,6 +1131,8 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
fd = int(r0)
@@ -996,6 +1142,8 @@ func socket(domain int, typ int, proto int) (fd int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc___xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
@@ -1004,6 +1152,8 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func write(fd int, p []byte) (n int, err error) {
var _p0 *byte
if len(p) > 0 {
@@ -1017,6 +1167,8 @@ func write(fd int, p []byte) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
@@ -1025,6 +1177,8 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_getpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
if e1 != 0 {
@@ -1033,6 +1187,8 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_getsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
if e1 != 0 {
@@ -1041,6 +1197,8 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_setsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
@@ -1049,6 +1207,8 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr)
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 *byte
if len(p) > 0 {
@@ -1062,6 +1222,8 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
n = int(r0)
@@ -1071,6 +1233,8 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func getexecname() (path unsafe.Pointer, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_getexecname)), 0, 0, 0, 0, 0, 0, 0)
path = unsafe.Pointer(r0)
@@ -1080,6 +1244,8 @@ func getexecname() (path unsafe.Pointer, err error) {
return
}
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/ztypes_aix_ppc64.go b/src/syscall/ztypes_aix_ppc64.go
new file mode 100644
index 0000000000..4fbbe23134
--- /dev/null
+++ b/src/syscall/ztypes_aix_ppc64.go
@@ -0,0 +1,272 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_aix.go | go run mkpost.go
+
+package syscall
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ PathMax = 0x3ff
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int32
+ Pad_cgo_0 [4]byte
+}
+
+type Timeval32 struct {
+ Sec int32
+ Usec int32
+}
+
+type Timezone struct {
+ Minuteswest int32
+ Dsttime int32
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type Pid_t int32
+
+type _Gid_t uint32
+
+type Flock_t struct {
+ Type int16
+ Whence int16
+ Sysid uint32
+ Pid int32
+ Vfs int32
+ Start int64
+ Len int64
+}
+
+type Stat_t struct {
+ Dev uint64
+ Ino uint64
+ Mode uint32
+ Nlink int16
+ Flag uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint64
+ Ssize int32
+ Pad_cgo_0 [4]byte
+ Atim StTimespec_t
+ Mtim StTimespec_t
+ Ctim StTimespec_t
+ Blksize int64
+ Blocks int64
+ Vfstype int32
+ Vfs uint32
+ Type uint32
+ Gen uint32
+ Reserved [9]uint32
+ Padto_ll uint32
+ Size int64
+}
+
+type Statfs_t struct {
+ Version int32
+ Type int32
+ Bsize uint64
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid Fsid64_t
+ Vfstype int32
+ Pad_cgo_0 [4]byte
+ Fsize uint64
+ Vfsnumber int32
+ Vfsoff int32
+ Vfslen int32
+ Vfsvers int32
+ Fname [32]uint8
+ Fpack [32]uint8
+ Name_max int32
+ Pad_cgo_1 [4]byte
+}
+
+type Fsid64_t struct {
+ Val [2]uint64
+}
+
+type StTimespec_t struct {
+ Sec int64
+ Nsec int32
+ Pad_cgo_0 [4]byte
+}
+
+type Dirent struct {
+ Offset uint64
+ Ino uint64
+ Reclen uint16
+ Namlen uint16
+ Name [256]uint8
+ Pad_cgo_0 [4]byte
+}
+
+type RawSockaddrInet4 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Len uint8
+ Family uint8
+ Path [1023]uint8
+}
+
+type RawSockaddr struct {
+ Len uint8
+ Family uint8
+ Data [14]uint8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [1012]uint8
+}
+
+type _Socklen uint32
+
+type Cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+type ICMPv6Filter struct {
+ Filt [8]uint32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint64
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Pad_cgo_0 [4]byte
+ Iov *Iovec
+ Iovlen int32
+ Pad_cgo_1 [4]byte
+ Control *byte
+ Controllen uint32
+ Flags int32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x404
+ SizeofSockaddrUnix = 0x401
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofICMPv6Filter = 0x20
+)
+
+const (
+ PTRACE_TRACEME = 0x0
+ PTRACE_CONT = 0x7
+ PTRACE_KILL = 0x8
+)
+
+const (
+ SizeofIfMsghdr = 0x10
+)
+
+type IfMsgHdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Addrs int32
+ Flags int32
+ Index uint16
+ Addrlen uint8
+ Pad_cgo_0 [1]byte
+}
+
+type Utsname struct {
+ Sysname [32]uint8
+ Nodename [32]uint8
+ Release [32]uint8
+ Version [32]uint8
+ Machine [32]uint8
+}
+
+const (
+ _AT_FDCWD = -0x2
+ _AT_REMOVEDIR = 0x1
+ _AT_SYMLINK_NOFOLLOW = 0x1
+)
diff --git a/src/syscall/ztypes_freebsd_386.go b/src/syscall/ztypes_freebsd_386.go
index 242a73d1de..3ca31f2872 100644
--- a/src/syscall/ztypes_freebsd_386.go
+++ b/src/syscall/ztypes_freebsd_386.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_freebsd.go
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_freebsd.go | go run mkpost.go
// +build 386,freebsd
@@ -75,28 +75,84 @@ const (
S_IRWXO = 0x7
)
+const (
+ _statfsVersion = 0x20140518
+ _dirblksiz = 0x400
+)
+
type Stat_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtimespec Timespec
- Pad_cgo_0 [8]byte
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ Padding0 int16
+ Uid uint32
+ Gid uint32
+ Padding1 int32
+ Rdev uint64
+ Atim_ext int32
+ Atim Timespec
+ Mtim_ext int32
+ Mtim Timespec
+ Ctim_ext int32
+ Ctim Timespec
+ Btim_ext int32
+ Birthtim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
+}
+
+type stat_freebsd11_t struct {
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Birthtim Timespec
+ Pad_cgo_0 [8]byte
}
type Statfs_t struct {
+ Version uint32
+ Type uint32
+ Flags uint64
+ Bsize uint64
+ Iosize uint64
+ Blocks uint64
+ Bfree uint64
+ Bavail int64
+ Files uint64
+ Ffree int64
+ Syncwrites uint64
+ Asyncwrites uint64
+ Syncreads uint64
+ Asyncreads uint64
+ Spare [10]uint64
+ Namemax uint32
+ Owner uint32
+ Fsid Fsid
+ Charspare [80]int8
+ Fstypename [16]int8
+ Mntfromname [1024]int8
+ Mntonname [1024]int8
+}
+
+type statfs_freebsd11_t struct {
Version uint32
Type uint32
Flags uint64
@@ -131,6 +187,17 @@ type Flock_t struct {
}
type Dirent struct {
+ Fileno uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Pad0 uint8
+ Namlen uint16
+ Pad1 uint16
+ Name [256]int8
+}
+
+type dirent_freebsd11 struct {
Fileno uint32
Reclen uint16
Type uint8
@@ -490,7 +557,9 @@ type BpfZbufHeader struct {
}
const (
- _AT_FDCWD = -0x64
+ _AT_FDCWD = -0x64
+ _AT_SYMLINK_FOLLOW = 0x400
+ _AT_SYMLINK_NOFOLLOW = 0x200
)
type Termios struct {
diff --git a/src/syscall/ztypes_freebsd_amd64.go b/src/syscall/ztypes_freebsd_amd64.go
index 8b34cde2ee..797a3bab08 100644
--- a/src/syscall/ztypes_freebsd_amd64.go
+++ b/src/syscall/ztypes_freebsd_amd64.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_freebsd.go
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs types_freebsd.go | go run mkpost.go
// +build amd64,freebsd
@@ -75,27 +75,79 @@ const (
S_IRWXO = 0x7
)
+const (
+ _statfsVersion = 0x20140518
+ _dirblksiz = 0x400
+)
+
type Stat_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtimespec Timespec
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ Padding0 int16
+ Uid uint32
+ Gid uint32
+ Padding1 int32
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Birthtim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
+}
+
+type stat_freebsd11_t struct {
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Birthtim Timespec
}
type Statfs_t struct {
+ Version uint32
+ Type uint32
+ Flags uint64
+ Bsize uint64
+ Iosize uint64
+ Blocks uint64
+ Bfree uint64
+ Bavail int64
+ Files uint64
+ Ffree int64
+ Syncwrites uint64
+ Asyncwrites uint64
+ Syncreads uint64
+ Asyncreads uint64
+ Spare [10]uint64
+ Namemax uint32
+ Owner uint32
+ Fsid Fsid
+ Charspare [80]int8
+ Fstypename [16]int8
+ Mntfromname [1024]int8
+ Mntonname [1024]int8
+}
+
+type statfs_freebsd11_t struct {
Version uint32
Type uint32
Flags uint64
@@ -131,6 +183,17 @@ type Flock_t struct {
}
type Dirent struct {
+ Fileno uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Pad0 uint8
+ Namlen uint16
+ Pad1 uint16
+ Name [256]int8
+}
+
+type dirent_freebsd11 struct {
Fileno uint32
Reclen uint16
Type uint8
@@ -493,7 +556,9 @@ type BpfZbufHeader struct {
}
const (
- _AT_FDCWD = -0x64
+ _AT_FDCWD = -0x64
+ _AT_SYMLINK_FOLLOW = 0x400
+ _AT_SYMLINK_NOFOLLOW = 0x200
)
type Termios struct {
diff --git a/src/syscall/ztypes_freebsd_arm.go b/src/syscall/ztypes_freebsd_arm.go
index 4fd6bd509c..9be8752e18 100644
--- a/src/syscall/ztypes_freebsd_arm.go
+++ b/src/syscall/ztypes_freebsd_arm.go
@@ -1,4 +1,4 @@
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -fsigned-char types_freebsd.go
// +build arm,freebsd
@@ -77,27 +77,79 @@ const (
S_IRWXO = 0x7
)
+const (
+ _statfsVersion = 0x20140518
+ _dirblksiz = 0x400
+)
+
type Stat_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtimespec Timespec
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ Padding0 int16
+ Uid uint32
+ Gid uint32
+ Padding1 int32
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Birthtim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
+}
+
+type stat_freebsd11_t struct {
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Birthtim Timespec
}
type Statfs_t struct {
+ Version uint32
+ Type uint32
+ Flags uint64
+ Bsize uint64
+ Iosize uint64
+ Blocks uint64
+ Bfree uint64
+ Bavail int64
+ Files uint64
+ Ffree int64
+ Syncwrites uint64
+ Asyncwrites uint64
+ Syncreads uint64
+ Asyncreads uint64
+ Spare [10]uint64
+ Namemax uint32
+ Owner uint32
+ Fsid Fsid
+ Charspare [80]int8
+ Fstypename [16]int8
+ Mntfromname [1024]int8
+ Mntonname [1024]int8
+}
+
+type statfs_freebsd11_t struct {
Version uint32
Type uint32
Flags uint64
@@ -133,6 +185,17 @@ type Flock_t struct {
}
type Dirent struct {
+ Fileno uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Pad0 uint8
+ Namlen uint16
+ Pad1 uint16
+ Name [256]int8
+}
+
+type dirent_freebsd11 struct {
Fileno uint32
Reclen uint16
Type uint8
@@ -493,7 +556,9 @@ type BpfZbufHeader struct {
}
const (
- _AT_FDCWD = -0x64
+ _AT_FDCWD = -0x64
+ _AT_SYMLINK_FOLLOW = 0x400
+ _AT_SYMLINK_NOFOLLOW = 0x200
)
type Termios struct {
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
index 9c7b1be79e..90f86dc373 100644
--- a/src/testing/benchmark.go
+++ b/src/testing/benchmark.go
@@ -10,15 +10,50 @@ import (
"internal/race"
"os"
"runtime"
+ "strconv"
+ "strings"
"sync"
"sync/atomic"
"time"
)
var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
-var benchTime = flag.Duration("test.benchtime", 1*time.Second, "run each benchmark for duration `d`")
+var benchTime = benchTimeFlag{d: 1 * time.Second}
var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
+func init() {
+ flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
+}
+
+type benchTimeFlag struct {
+ d time.Duration
+ n int
+}
+
+func (f *benchTimeFlag) String() string {
+ if f.n > 0 {
+ return fmt.Sprintf("%dx", f.n)
+ }
+ return time.Duration(f.d).String()
+}
+
+func (f *benchTimeFlag) Set(s string) error {
+ if strings.HasSuffix(s, "x") {
+ n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
+ if err != nil || n <= 0 {
+ return fmt.Errorf("invalid count")
+ }
+ *f = benchTimeFlag{n: int(n)}
+ return nil
+ }
+ d, err := time.ParseDuration(s)
+ if err != nil || d <= 0 {
+ return fmt.Errorf("invalid duration")
+ }
+ *f = benchTimeFlag{d: d}
+ return nil
+}
+
// Global lock to ensure only one benchmark runs at a time.
var benchmarkLock sync.Mutex
@@ -53,7 +88,7 @@ type B struct {
previousN int // number of iterations in the previous run
previousDuration time.Duration // total duration of the previous run
benchFunc func(b *B)
- benchTime time.Duration
+ benchTime benchTimeFlag
bytes int64
missingBytes bool // one of the subbenchmarks does not have bytes set.
timerOn bool
@@ -273,21 +308,25 @@ func (b *B) launch() {
}()
// Run the benchmark for at least the specified amount of time.
- d := b.benchTime
- for n := 1; !b.failed && b.duration < d && n < 1e9; {
- last := n
- // Predict required iterations.
- n = int(d.Nanoseconds())
- if nsop := b.nsPerOp(); nsop != 0 {
- n /= int(nsop)
+ if b.benchTime.n > 0 {
+ b.runN(b.benchTime.n)
+ } else {
+ d := b.benchTime.d
+ for n := 1; !b.failed && b.duration < d && n < 1e9; {
+ last := n
+ // Predict required iterations.
+ n = int(d.Nanoseconds())
+ if nsop := b.nsPerOp(); nsop != 0 {
+ n /= int(nsop)
+ }
+ // Run more iterations than we think we'll need (1.2x).
+ // Don't grow too fast in case we had timing errors previously.
+ // Be sure to run at least one more than last time.
+ n = max(min(n+n/5, 100*last), last+1)
+ // Round up to something easy to read.
+ n = roundUp(n)
+ b.runN(n)
}
- // Run more iterations than we think we'll need (1.2x).
- // Don't grow too fast in case we had timing errors previously.
- // Be sure to run at least one more than last time.
- n = max(min(n+n/5, 100*last), last+1)
- // Round up to something easy to read.
- n = roundUp(n)
- b.runN(n)
}
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
}
@@ -416,7 +455,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
b.Run(Benchmark.Name, Benchmark.F)
}
},
- benchTime: *benchTime,
+ benchTime: benchTime,
context: ctx,
}
main.runN(1)
@@ -653,7 +692,7 @@ func Benchmark(f func(b *B)) BenchmarkResult {
w: discard{},
},
benchFunc: f,
- benchTime: *benchTime,
+ benchTime: benchTime,
}
if b.run1() {
b.run()
diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go
index 29803c06e2..8c989714a1 100644
--- a/src/testing/sub_test.go
+++ b/src/testing/sub_test.go
@@ -17,7 +17,7 @@ import (
func init() {
// Make benchmark tests run 10* faster.
- *benchTime = 100 * time.Millisecond
+ benchTime.d = 100 * time.Millisecond
}
func TestTestContext(t *T) {
@@ -411,6 +411,29 @@ func TestTRun(t *T) {
ch <- true
<-ch
},
+ }, {
+ desc: "log in finished sub test logs to parent",
+ ok: false,
+ output: `
+ --- FAIL: log in finished sub test logs to parent (N.NNs)
+ sub_test.go:NNN: message2
+ sub_test.go:NNN: message1
+ sub_test.go:NNN: error`,
+ maxPar: 1,
+ f: func(t *T) {
+ ch := make(chan bool)
+ t.Run("sub", func(t2 *T) {
+ go func() {
+ <-ch
+ t2.Log("message1")
+ ch <- true
+ }()
+ })
+ t.Log("message2")
+ ch <- true
+ <-ch
+ t.Errorf("error")
+ },
}}
for _, tc := range testCases {
ctx := newTestContext(tc.maxPar, newMatcher(regexp.MatchString, "", ""))
@@ -570,7 +593,7 @@ func TestBRun(t *T) {
chatty: tc.chatty,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
- benchTime: time.Microsecond,
+ benchTime: benchTimeFlag{d: 1 * time.Microsecond},
}
root.runN(1)
if ok != !tc.failed {
diff --git a/src/testing/testing.go b/src/testing/testing.go
index 179987b699..0bc222c0bb 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -403,8 +403,8 @@ func (c *common) frameSkip(skip int) runtime.Frame {
// decorate prefixes the string with the file and line of the call site
// and inserts the final newline if needed and indentation spaces for formatting.
// This function must be called with c.mu held.
-func (c *common) decorate(s string) string {
- frame := c.frameSkip(3) // decorate + log + public function.
+func (c *common) decorate(s string, skip int) string {
+ frame := c.frameSkip(skip)
file := frame.File
line := frame.Line
if file != "" {
@@ -599,9 +599,25 @@ func (c *common) FailNow() {
// log generates the output. It's always at the same stack depth.
func (c *common) log(s string) {
+ c.logDepth(s, 3) // logDepth + log + public function
+}
+
+// logDepth generates the output. At an arbitary stack depth
+func (c *common) logDepth(s string, depth int) {
c.mu.Lock()
defer c.mu.Unlock()
- c.output = append(c.output, c.decorate(s)...)
+ // If this test has already finished try and log this message with our parent
+ // with this test name tagged so we know where it came from.
+ // If we don't have a parent panic.
+ if c.done {
+ if c.parent != nil {
+ c.parent.logDepth(s, depth+1)
+ } else {
+ panic("Log in goroutine after " + c.name + " has completed")
+ }
+ } else {
+ c.output = append(c.output, c.decorate(s, depth+1)...)
+ }
}
// Log formats its arguments using default formatting, analogous to Println,
diff --git a/src/text/template/doc.go b/src/text/template/doc.go
index 4b243067b0..0179dec5c3 100644
--- a/src/text/template/doc.go
+++ b/src/text/template/doc.go
@@ -142,7 +142,9 @@ An argument is a simple value, denoted by one of the following.
- A boolean, string, character, integer, floating-point, imaginary
or complex constant in Go syntax. These behave like Go's untyped
- constants.
+ constants. Note that, as in Go, whether a large integer constant
+ overflows when assigned or passed to a function can depend on whether
+ the host machine's ints are 32 or 64 bits.
- The keyword nil, representing an untyped Go nil.
- The character '.' (period):
.
diff --git a/src/text/template/exec.go b/src/text/template/exec.go
index 1d04c2982f..c6ce657cf6 100644
--- a/src/text/template/exec.go
+++ b/src/text/template/exec.go
@@ -7,10 +7,10 @@ package template
import (
"bytes"
"fmt"
+ "internal/fmtsort"
"io"
"reflect"
"runtime"
- "sort"
"strings"
"text/template/parse"
)
@@ -362,8 +362,9 @@ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
if val.Len() == 0 {
break
}
- for _, key := range sortKeys(val.MapKeys()) {
- oneIteration(key, val.MapIndex(key))
+ om := fmtsort.Sort(val)
+ for i, key := range om.Key {
+ oneIteration(key, om.Value[i])
}
return
case reflect.Chan:
@@ -692,13 +693,13 @@ func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, a
}
argv[i] = s.validateType(final, t)
}
- result := fun.Call(argv)
- // If we have an error that is not nil, stop execution and return that error to the caller.
- if len(result) == 2 && !result[1].IsNil() {
+ v, err := safeCall(fun, argv)
+ // If we have an error that is not nil, stop execution and return that
+ // error to the caller.
+ if err != nil {
s.at(node)
- s.errorf("error calling %s: %s", name, result[1].Interface().(error))
+ s.errorf("error calling %s: %v", name, err)
}
- v := result[0]
if v.Type() == reflectValueType {
v = v.Interface().(reflect.Value)
}
@@ -958,29 +959,3 @@ func printableValue(v reflect.Value) (interface{}, bool) {
}
return v.Interface(), true
}
-
-// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
-func sortKeys(v []reflect.Value) []reflect.Value {
- if len(v) <= 1 {
- return v
- }
- switch v[0].Kind() {
- case reflect.Float32, reflect.Float64:
- sort.Slice(v, func(i, j int) bool {
- return v[i].Float() < v[j].Float()
- })
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- sort.Slice(v, func(i, j int) bool {
- return v[i].Int() < v[j].Int()
- })
- case reflect.String:
- sort.Slice(v, func(i, j int) bool {
- return v[i].String() < v[j].String()
- })
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- sort.Slice(v, func(i, j int) bool {
- return v[i].Uint() < v[j].Uint()
- })
- }
- return v
-}
diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
index 648ad8ff03..bfd6d38bf4 100644
--- a/src/text/template/exec_test.go
+++ b/src/text/template/exec_test.go
@@ -74,6 +74,7 @@ type T struct {
VariadicFuncInt func(int, ...string) string
NilOKFunc func(*int) bool
ErrFunc func() (string, error)
+ PanicFunc func() string
// Template to test evaluation of templates.
Tmpl *Template
// Unexported field; cannot be accessed by template.
@@ -156,6 +157,7 @@ var tVal = &T{
VariadicFuncInt: func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
NilOKFunc: func(s *int) bool { return s == nil },
ErrFunc: func() (string, error) { return "bla", nil },
+ PanicFunc: func() string { panic("test panic") },
Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X
}
@@ -1451,3 +1453,60 @@ func TestInterfaceValues(t *testing.T) {
}
}
}
+
+// Check that panics during calls are recovered and returned as errors.
+func TestExecutePanicDuringCall(t *testing.T) {
+ funcs := map[string]interface{}{
+ "doPanic": func() string {
+ panic("custom panic string")
+ },
+ }
+ tests := []struct {
+ name string
+ input string
+ data interface{}
+ wantErr string
+ }{
+ {
+ "direct func call panics",
+ "{{doPanic}}", (*T)(nil),
+ `template: t:1:2: executing "t" at : error calling doPanic: custom panic string`,
+ },
+ {
+ "indirect func call panics",
+ "{{call doPanic}}", (*T)(nil),
+ `template: t:1:7: executing "t" at : error calling doPanic: custom panic string`,
+ },
+ {
+ "direct method call panics",
+ "{{.GetU}}", (*T)(nil),
+ `template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+ },
+ {
+ "indirect method call panics",
+ "{{call .GetU}}", (*T)(nil),
+ `template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+ },
+ {
+ "func field call panics",
+ "{{call .PanicFunc}}", tVal,
+ `template: t:1:2: executing "t" at : error calling call: test panic`,
+ },
+ }
+ for _, tc := range tests {
+ b := new(bytes.Buffer)
+ tmpl, err := New("t").Funcs(funcs).Parse(tc.input)
+ if err != nil {
+ t.Fatalf("parse error: %s", err)
+ }
+ err = tmpl.Execute(b, tc.data)
+ if err == nil {
+ t.Errorf("%s: expected error; got none", tc.name)
+ } else if !strings.Contains(err.Error(), tc.wantErr) {
+ if *debug {
+ fmt.Printf("%s: test execute error: %s\n", tc.name, err)
+ }
+ t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err)
+ }
+ }
+}
diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
index 31fe77a327..72d3f66691 100644
--- a/src/text/template/funcs.go
+++ b/src/text/template/funcs.go
@@ -275,11 +275,26 @@ func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
return reflect.Value{}, fmt.Errorf("arg %d: %s", i, err)
}
}
- result := v.Call(argv)
- if len(result) == 2 && !result[1].IsNil() {
- return result[0], result[1].Interface().(error)
+ return safeCall(v, argv)
+}
+
+// safeCall runs fun.Call(args), and returns the resulting value and error, if
+// any. If the call panics, the panic value is returned as an error.
+func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(error); ok {
+ err = e
+ } else {
+ err = fmt.Errorf("%v", r)
+ }
+ }
+ }()
+ ret := fun.Call(args)
+ if len(ret) == 2 && !ret[1].IsNil() {
+ return ret[0], ret[1].Interface().(error)
}
- return result[0], nil
+ return ret[0], nil
}
// Boolean logic.
diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
index cb9b44e9da..efdad3297c 100644
--- a/src/text/template/parse/parse.go
+++ b/src/text/template/parse/parse.go
@@ -148,9 +148,6 @@ func (t *Tree) ErrorContext(n Node) (location, context string) {
}
lineNum := 1 + strings.Count(text, "\n")
context = n.String()
- if len(context) > 20 {
- context = fmt.Sprintf("%.20s...", context)
- }
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
}
diff --git a/src/time/sys_unix.go b/src/time/sys_unix.go
index e064e0046c..f4756b18a6 100644
--- a/src/time/sys_unix.go
+++ b/src/time/sys_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package time
diff --git a/src/time/zoneinfo_unix.go b/src/time/zoneinfo_unix.go
index 682e24b566..fca8e5497b 100644
--- a/src/time/zoneinfo_unix.go
+++ b/src/time/zoneinfo_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin,386 darwin,amd64 dragonfly freebsd js,wasm linux,!android nacl netbsd openbsd solaris
+// +build aix darwin,386 darwin,amd64 dragonfly freebsd js,wasm linux,!android nacl netbsd openbsd solaris
// Parse "zoneinfo" time zone file.
// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
diff --git a/src/unsafe/unsafe.go b/src/unsafe/unsafe.go
index 00961cffa3..e16c4aeacb 100644
--- a/src/unsafe/unsafe.go
+++ b/src/unsafe/unsafe.go
@@ -178,11 +178,13 @@ type Pointer *ArbitraryType
// The size does not include any memory possibly referenced by x.
// For instance, if x is a slice, Sizeof returns the size of the slice
// descriptor, not the size of the memory referenced by the slice.
+// The return value of Sizeof is a Go constant.
func Sizeof(x ArbitraryType) uintptr
// Offsetof returns the offset within the struct of the field represented by x,
// which must be of the form structValue.field. In other words, it returns the
// number of bytes between the start of the struct and the start of the field.
+// The return value of Offsetof is a Go constant.
func Offsetof(x ArbitraryType) uintptr
// Alignof takes an expression x of any type and returns the required alignment
@@ -193,4 +195,5 @@ func Offsetof(x ArbitraryType) uintptr
// within that struct, then Alignof(s.f) will return the required alignment
// of a field of that type within a struct. This case is the same as the
// value returned by reflect.TypeOf(s.f).FieldAlign().
+// The return value of Alignof is a Go constant.
func Alignof(x ArbitraryType) uintptr
diff --git a/src/vendor/golang_org/x/net/route/interface.go b/src/vendor/golang_org/x/net/route/interface.go
index 854906d9c4..05ef2a9ce3 100644
--- a/src/vendor/golang_org/x/net/route/interface.go
+++ b/src/vendor/golang_org/x/net/route/interface.go
@@ -37,7 +37,7 @@ func (m *InterfaceAddrMessage) Sys() []Sys { return nil }
// address message.
type InterfaceMulticastAddrMessage struct {
Version int // message version
- Type int // messsage type
+ Type int // message type
Flags int // interface flags
Index int // interface index
Addrs []Addr // addresses
diff --git a/src/vendor/golang_org/x/net/route/message_freebsd_test.go b/src/vendor/golang_org/x/net/route/message_freebsd_test.go
index db4b56752c..c6d8a5f54c 100644
--- a/src/vendor/golang_org/x/net/route/message_freebsd_test.go
+++ b/src/vendor/golang_org/x/net/route/message_freebsd_test.go
@@ -4,10 +4,7 @@
package route
-import (
- "testing"
- "unsafe"
-)
+import "testing"
func TestFetchAndParseRIBOnFreeBSD(t *testing.T) {
for _, typ := range []RIBType{sysNET_RT_IFMALIST} {
@@ -40,8 +37,7 @@ func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) {
if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil {
t.Skip("NET_RT_IFLISTL not supported")
}
- var p uintptr
- if kernelAlign != int(unsafe.Sizeof(p)) {
+ if compatFreeBSD32 {
t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64")
}
diff --git a/src/vendor/golang_org/x/net/route/sys_freebsd.go b/src/vendor/golang_org/x/net/route/sys_freebsd.go
index 89ba1c4e26..fe91be1249 100644
--- a/src/vendor/golang_org/x/net/route/sys_freebsd.go
+++ b/src/vendor/golang_org/x/net/route/sys_freebsd.go
@@ -54,10 +54,12 @@ func (m *InterfaceMessage) Sys() []Sys {
}
}
+var compatFreeBSD32 bool // 386 emulation on amd64
+
func probeRoutingStack() (int, map[int]*wireFormat) {
var p uintptr
wordSize := int(unsafe.Sizeof(p))
- align := int(unsafe.Sizeof(p))
+ align := wordSize
// In the case of kern.supported_archs="amd64 i386", we need
// to know the underlying kernel's architecture because the
// alignment for routing facilities are set at the build time
@@ -83,8 +85,11 @@ func probeRoutingStack() (int, map[int]*wireFormat) {
break
}
}
+ if align != wordSize {
+ compatFreeBSD32 = true // 386 emulation on amd64
+ }
var rtm, ifm, ifam, ifmam, ifanm *wireFormat
- if align != wordSize { // 386 emulation on amd64
+ if compatFreeBSD32 {
rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu}
ifm = &wireFormat{extOff: 16}
ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu}
@@ -100,35 +105,38 @@ func probeRoutingStack() (int, map[int]*wireFormat) {
rel, _ := syscall.SysctlUint32("kern.osreldate")
switch {
case rel < 800000:
- if align != wordSize { // 386 emulation on amd64
+ if compatFreeBSD32 {
ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu
} else {
ifm.bodyOff = sizeofIfMsghdrFreeBSD7
}
case 800000 <= rel && rel < 900000:
- if align != wordSize { // 386 emulation on amd64
+ if compatFreeBSD32 {
ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu
} else {
ifm.bodyOff = sizeofIfMsghdrFreeBSD8
}
case 900000 <= rel && rel < 1000000:
- if align != wordSize { // 386 emulation on amd64
+ if compatFreeBSD32 {
ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu
} else {
ifm.bodyOff = sizeofIfMsghdrFreeBSD9
}
case 1000000 <= rel && rel < 1100000:
- if align != wordSize { // 386 emulation on amd64
+ if compatFreeBSD32 {
ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu
} else {
ifm.bodyOff = sizeofIfMsghdrFreeBSD10
}
default:
- if align != wordSize { // 386 emulation on amd64
+ if compatFreeBSD32 {
ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu
} else {
ifm.bodyOff = sizeofIfMsghdrFreeBSD11
}
+ if rel >= 1102000 { // see https://github.com/freebsd/freebsd/commit/027c7f4d66ff8d8c4a46c3665a5ee7d6d8462034#diff-ad4e5b7f1449ea3fc87bc97280de145b
+ align = wordSize
+ }
}
rtm.parse = rtm.parseRouteMessage
ifm.parse = ifm.parseInterfaceMessage
diff --git a/src/vendor/golang_org/x/net/route/syscall.go b/src/vendor/golang_org/x/net/route/syscall.go
index c211188b10..5f69ea63d9 100644
--- a/src/vendor/golang_org/x/net/route/syscall.go
+++ b/src/vendor/golang_org/x/net/route/syscall.go
@@ -20,7 +20,7 @@ func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr)
} else {
p = unsafe.Pointer(&zero)
}
- _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen)
if errno != 0 {
return error(errno)
}
diff --git a/test/closure3.dir/main.go b/test/closure3.dir/main.go
index e382ad980b..59c36e3218 100644
--- a/test/closure3.dir/main.go
+++ b/test/closure3.dir/main.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Check correctness of various closure corner cases that
+// Check correctness of various closure corner cases
// that are expected to be inlined
package main
diff --git a/test/closure3.go b/test/closure3.go
index 263d8fcb47..37b548d6dc 100644
--- a/test/closure3.go
+++ b/test/closure3.go
@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Check correctness of various closure corner cases that
+// Check correctness of various closure corner cases
// that are expected to be inlined
package ignored
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index 8e2a210948..c65fb0144a 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -14,7 +14,8 @@ package codegen
// Subtraction //
// ----------------- //
-func SubMem(arr []int, b int) int {
+var ef int
+func SubMem(arr []int, b, c, d int) int {
// 386:`SUBL\s[A-Z]+,\s8\([A-Z]+\)`
// amd64:`SUBQ\s[A-Z]+,\s16\([A-Z]+\)`
arr[2] -= b
@@ -25,6 +26,14 @@ func SubMem(arr []int, b int) int {
arr[4]--
// 386:`ADDL\s[$]-20,\s20\([A-Z]+\)`
arr[5] -= 20
+ // 386:`SUBL\s\([A-Z]+\)\([A-Z]+\*4\),\s[A-Z]+`
+ ef -= arr[b]
+ // 386:`SUBL\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+\*4\)`
+ arr[c] -= b
+ // 386:`ADDL\s[$]-15,\s\([A-Z]+\)\([A-Z]+\*4\)`
+ arr[d] -= 15
+ // 386:`DECL\s\([A-Z]+\)\([A-Z]+\*4\)`
+ arr[b]--
// 386:"SUBL\t4"
// amd64:"SUBQ\t8"
return arr[0] - arr[1]
@@ -39,12 +48,16 @@ func Pow2Muls(n1, n2 int) (int, int) {
// 386:"SHLL\t[$]5",-"IMULL"
// arm:"SLL\t[$]5",-"MUL"
// arm64:"LSL\t[$]5",-"MUL"
+ // ppc64:"SLD\t[$]5",-"MUL"
+ // ppc64le:"SLD\t[$]5",-"MUL"
a := n1 * 32
// amd64:"SHLQ\t[$]6",-"IMULQ"
// 386:"SHLL\t[$]6",-"IMULL"
// arm:"SLL\t[$]6",-"MUL"
// arm64:`NEG\sR[0-9]+<<6,\sR[0-9]+`,-`LSL`,-`MUL`
+ // ppc64:"SLD\t[$]6","NEG\\sR[0-9]+,\\sR[0-9]+",-"MUL"
+ // ppc64le:"SLD\t[$]6","NEG\\sR[0-9]+,\\sR[0-9]+",-"MUL"
b := -64 * n2
return a, b
@@ -108,12 +121,16 @@ func Pow2Divs(n1 uint, n2 int) (uint, int) {
// amd64:"SHRQ\t[$]5",-"DIVQ"
// arm:"SRL\t[$]5",-".*udiv"
// arm64:"LSR\t[$]5",-"UDIV"
+ // ppc64:"SRD"
+ // ppc64le:"SRD"
a := n1 / 32 // unsigned
// amd64:"SARQ\t[$]6",-"IDIVQ"
// 386:"SARL\t[$]6",-"IDIVL"
// arm:"SRA\t[$]6",-".*udiv"
// arm64:"ASR\t[$]6",-"SDIV"
+ // ppc64:"SRAD"
+ // ppc64le:"SRAD"
b := n2 / 64 // signed
return a, b
@@ -140,6 +157,8 @@ func Pow2Mods(n1 uint, n2 int) (uint, int) {
// amd64:"ANDQ\t[$]31",-"DIVQ"
// arm:"AND\t[$]31",-".*udiv"
// arm64:"AND\t[$]31",-"UDIV"
+ // ppc64:"ANDCC\t[$]31"
+ // ppc64le:"ANDCC\t[$]31"
a := n1 % 32 // unsigned
// 386:-"IDIVL"
@@ -162,42 +181,135 @@ func ConstMods(n1 uint, n2 int) (uint, int) {
return a, b
}
+// Check that fix-up code is not generated for divisions where it has been proven that
+// that the divisor is not -1 or that the dividend is > MinIntNN.
+func NoFix64A(divr int64) (int64, int64) {
+ var d int64 = 42
+ var e int64 = 84
+ if divr > 5 {
+ d /= divr // amd64:-"JMP"
+ e %= divr // amd64:-"JMP"
+ }
+ return d, e
+}
+
+func NoFix64B(divd int64) (int64, int64) {
+ var d int64
+ var e int64
+ var divr int64 = -1
+ if divd > -9223372036854775808 {
+ d = divd / divr // amd64:-"JMP"
+ e = divd % divr // amd64:-"JMP"
+ }
+ return d, e
+}
+
+func NoFix32A(divr int32) (int32, int32) {
+ var d int32 = 42
+ var e int32 = 84
+ if divr > 5 {
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ d /= divr
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ e %= divr
+ }
+ return d, e
+}
+
+func NoFix32B(divd int32) (int32, int32) {
+ var d int32
+ var e int32
+ var divr int32 = -1
+ if divd > -2147483648 {
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ d = divd / divr
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ e = divd % divr
+ }
+ return d, e
+}
+
+func NoFix16A(divr int16) (int16, int16) {
+ var d int16 = 42
+ var e int16 = 84
+ if divr > 5 {
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ d /= divr
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ e %= divr
+ }
+ return d, e
+}
+
+func NoFix16B(divd int16) (int16, int16) {
+ var d int16
+ var e int16
+ var divr int16 = -1
+ if divd > -32768 {
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ d = divd / divr
+ // amd64:-"JMP"
+ // 386:-"JMP"
+ e = divd % divr
+ }
+ return d, e
+}
+
// Check that len() and cap() calls divided by powers of two are
// optimized into shifts and ands
func LenDiv1(a []int) int {
// 386:"SHRL\t[$]10"
// amd64:"SHRQ\t[$]10"
+ // ppc64:"SRD"\t[$]10"
+ // ppc64le:"SRD"\t[$]10"
return len(a) / 1024
}
func LenDiv2(s string) int {
// 386:"SHRL\t[$]11"
// amd64:"SHRQ\t[$]11"
+ // ppc64:"SRD\t[$]11"
+ // ppc64le:"SRD\t[$]11"
return len(s) / (4097 >> 1)
}
func LenMod1(a []int) int {
// 386:"ANDL\t[$]1023"
// amd64:"ANDQ\t[$]1023"
+ // ppc64:"ANDCC\t[$]1023"
+ // ppc64le:"ANDCC\t[$]1023"
return len(a) % 1024
}
func LenMod2(s string) int {
// 386:"ANDL\t[$]2047"
// amd64:"ANDQ\t[$]2047"
+ // ppc64:"ANDCC\t[$]2047"
+ // ppc64le:"ANDCC\t[$]2047"
return len(s) % (4097 >> 1)
}
func CapDiv(a []int) int {
// 386:"SHRL\t[$]12"
// amd64:"SHRQ\t[$]12"
+ // ppc64:"SRD\t[$]12"
+ // ppc64le:"SRD\t[$]12"
return cap(a) / ((1 << 11) + 2048)
}
func CapMod(a []int) int {
// 386:"ANDL\t[$]4095"
// amd64:"ANDQ\t[$]4095"
+ // ppc64:"ANDCC\t[$]4095"
+ // ppc64le:"ANDCC\t[$]4095"
return cap(a) % ((1 << 11) + 2048)
}
@@ -218,3 +330,16 @@ func MULA(a, b, c uint32) (uint32, uint32, uint32) {
r2 := b*64 + c
return r0, r1, r2
}
+
+func MULS(a, b, c uint32) (uint32, uint32, uint32) {
+ // arm/7:`MULS`,-`MUL\s`
+ // arm64:`MSUBW`,-`MULW`
+ r0 := c - a*b
+ // arm/7:`MULS`-`MUL\s`
+ // arm64:`MSUBW`,-`MULW`
+ r1 := a - c*79
+ // arm/7:`SUB`,-`MULS`-`MUL\s`
+ // arm64:`SUB`,-`MSUBW`,-`MULW`
+ r2 := c - b*64
+ return r0, r1, r2
+}
diff --git a/test/codegen/comparisons.go b/test/codegen/comparisons.go
index 072393f3a6..fb17d3ca5d 100644
--- a/test/codegen/comparisons.go
+++ b/test/codegen/comparisons.go
@@ -36,6 +36,7 @@ func CompareString2(s string) bool {
func CompareString3(s string) bool {
// amd64:`CMPQ\t\(.*\), [A-Z]`
// arm64:-`CMPW\t`
+ // ppc64:-`CMPW\t`
// ppc64le:-`CMPW\t`
// s390x:-`CMPW\t`
return s == "xxxxxxxx"
diff --git a/test/codegen/copy.go b/test/codegen/copy.go
index dc8ee43f4c..46c2bde9ab 100644
--- a/test/codegen/copy.go
+++ b/test/codegen/copy.go
@@ -16,6 +16,8 @@ func movesmall4() {
// amd64:-".*memmove"
// arm:-".*memmove"
// arm64:-".*memmove"
+ // ppc64:-".*memmove"
+ // ppc64le:-".*memmove"
copy(x[1:], x[:])
}
@@ -24,6 +26,8 @@ func movesmall7() {
// 386:-".*memmove"
// amd64:-".*memmove"
// arm64:-".*memmove"
+ // ppc64:-".*memmove"
+ // ppc64le:-".*memmove"
copy(x[1:], x[:])
}
@@ -63,6 +67,7 @@ func moveDisjointNoOverlap(a *[256]byte) {
func ptrEqual() {
// amd64:-"JEQ",-"JNE"
+ // ppc64:-"BEQ",-"BNE"
// ppc64le:-"BEQ",-"BNE"
// s390x:-"BEQ",-"BNE"
copy(x[:], x[:])
@@ -70,6 +75,7 @@ func ptrEqual() {
func ptrOneOffset() {
// amd64:-"JEQ",-"JNE"
+ // ppc64:-"BEQ",-"BNE"
// ppc64le:-"BEQ",-"BNE"
// s390x:-"BEQ",-"BNE"
copy(x[1:], x[:])
@@ -77,6 +83,7 @@ func ptrOneOffset() {
func ptrBothOffset() {
// amd64:-"JEQ",-"JNE"
+ // ppc64:-"BEQ",-"BNE"
// ppc64le:-"BEQ",-"BNE"
// s390x:-"BEQ",-"BNE"
copy(x[1:], x[2:])
diff --git a/test/codegen/floats.go b/test/codegen/floats.go
index 4e4f87d574..5e1f60b08b 100644
--- a/test/codegen/floats.go
+++ b/test/codegen/floats.go
@@ -22,6 +22,8 @@ func Mul2(f float64) float64 {
// amd64:"ADDSD",-"MULSD"
// arm/7:"ADDD",-"MULD"
// arm64:"FADDD",-"FMULD"
+ // ppc64:"FADD",-"FMUL"
+ // ppc64le:"FADD",-"FMUL"
return f * 2.0
}
@@ -31,6 +33,8 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) {
// amd64:"MULSD",-"DIVSD"
// arm/7:"MULD",-"DIVD"
// arm64:"FMULD",-"FDIVD"
+ // ppc64:"FMUL",-"FDIV"
+ // ppc64le:"FMUL",-"FDIV"
x := f1 / 16.0
// 386/sse2:"MULSD",-"DIVSD"
@@ -38,6 +42,8 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) {
// amd64:"MULSD",-"DIVSD"
// arm/7:"MULD",-"DIVD"
// arm64:"FMULD",-"FDIVD"
+ // ppc64:"FMUL",-"FDIVD"
+ // ppc64le:"FMUL",-"FDIVD"
y := f2 / 0.125
// 386/sse2:"ADDSD",-"DIVSD",-"MULSD"
@@ -45,6 +51,8 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) {
// amd64:"ADDSD",-"DIVSD",-"MULSD"
// arm/7:"ADDD",-"MULD",-"DIVD"
// arm64:"FADDD",-"FMULD",-"FDIVD"
+ // ppc64:"FADD",-"FMUL",-"FDIV"
+ // ppc64le:"FADD",-"FMUL",-"FDIV"
z := f3 / 0.5
return x, y, z
@@ -71,28 +79,44 @@ func indexStore(b0 []float64, b1 float64, idx int) {
func FusedAdd32(x, y, z float32) float32 {
// s390x:"FMADDS\t"
+ // ppc64:"FMADDS\t"
// ppc64le:"FMADDS\t"
+ // arm64:"FMADDS"
return x*y + z
}
-func FusedSub32(x, y, z float32) float32 {
+func FusedSub32_a(x, y, z float32) float32 {
// s390x:"FMSUBS\t"
+ // ppc64:"FMSUBS\t"
// ppc64le:"FMSUBS\t"
return x*y - z
}
+func FusedSub32_b(x, y, z float32) float32 {
+ // arm64:"FMSUBS"
+ return z - x*y
+}
+
func FusedAdd64(x, y, z float64) float64 {
// s390x:"FMADD\t"
+ // ppc64:"FMADD\t"
// ppc64le:"FMADD\t"
+ // arm64:"FMADDD"
return x*y + z
}
-func FusedSub64(x, y, z float64) float64 {
+func FusedSub64_a(x, y, z float64) float64 {
// s390x:"FMSUB\t"
+ // ppc64:"FMSUB\t"
// ppc64le:"FMSUB\t"
return x*y - z
}
+func FusedSub64_b(x, y, z float64) float64 {
+ // arm64:"FMSUBD"
+ return z - x*y
+}
+
// ---------------- //
// Non-floats //
// ---------------- //
diff --git a/test/codegen/maps.go b/test/codegen/maps.go
index d167715898..8dd22ed5ca 100644
--- a/test/codegen/maps.go
+++ b/test/codegen/maps.go
@@ -37,6 +37,35 @@ func AccessString2(m map[string]int) bool {
return ok
}
+// ------------------- //
+// String Conversion //
+// ------------------- //
+
+func LookupStringConversionSimple(m map[string]int, bytes []byte) int {
+ // amd64:-`.*runtime\.slicebytetostring\(`
+ return m[string(bytes)]
+}
+
+func LookupStringConversionStructLit(m map[struct{ string }]int, bytes []byte) int {
+ // amd64:-`.*runtime\.slicebytetostring\(`
+ return m[struct{ string }{string(bytes)}]
+}
+
+func LookupStringConversionArrayLit(m map[[2]string]int, bytes []byte) int {
+ // amd64:-`.*runtime\.slicebytetostring\(`
+ return m[[2]string{string(bytes), string(bytes)}]
+}
+
+func LookupStringConversionNestedLit(m map[[1]struct{ s [1]string }]int, bytes []byte) int {
+ // amd64:-`.*runtime\.slicebytetostring\(`
+ return m[[1]struct{ s [1]string }{struct{ s [1]string }{s: [1]string{string(bytes)}}}]
+}
+
+func LookupStringConversionKeyedArrayLit(m map[[2]string]int, bytes []byte) int {
+ // amd64:-`.*runtime\.slicebytetostring\(`
+ return m[[2]string{0: string(bytes)}]
+}
+
// ------------------- //
// Map Clear //
// ------------------- //
diff --git a/test/codegen/math.go b/test/codegen/math.go
index 78e7bfa110..aaf6b080ff 100644
--- a/test/codegen/math.go
+++ b/test/codegen/math.go
@@ -13,21 +13,25 @@ var sink64 [8]float64
func approx(x float64) {
// s390x:"FIDBR\t[$]6"
// arm64:"FRINTPD"
+ // ppc64:"FRIP"
// ppc64le:"FRIP"
sink64[0] = math.Ceil(x)
// s390x:"FIDBR\t[$]7"
// arm64:"FRINTMD"
+ // ppc64:"FRIM"
// ppc64le:"FRIM"
sink64[1] = math.Floor(x)
// s390x:"FIDBR\t[$]1"
// arm64:"FRINTAD"
+ // ppc64:"FRIN"
// ppc64le:"FRIN"
sink64[2] = math.Round(x)
// s390x:"FIDBR\t[$]5"
// arm64:"FRINTZD"
+ // ppc64:"FRIZ"
// ppc64le:"FRIZ"
sink64[3] = math.Trunc(x)
@@ -51,11 +55,13 @@ func abs(x, y float64) {
// amd64:"BTRQ\t[$]63"
// arm64:"FABSD\t"
// s390x:"LPDFR\t",-"MOVD\t" (no integer load/store)
+ // ppc64:"FABS\t"
// ppc64le:"FABS\t"
sink64[0] = math.Abs(x)
// amd64:"BTRQ\t[$]63","PXOR" (TODO: this should be BTSQ)
// s390x:"LNDFR\t",-"MOVD\t" (no integer load/store)
+ // ppc64:"FNABS\t"
// ppc64le:"FNABS\t"
sink64[1] = -math.Abs(y)
}
@@ -70,11 +76,13 @@ func abs32(x float32) float32 {
func copysign(a, b, c float64) {
// amd64:"BTRQ\t[$]63","SHRQ\t[$]63","SHLQ\t[$]63","ORQ"
// s390x:"CPSDR",-"MOVD" (no integer load/store)
+ // ppc64:"FCPSGN"
// ppc64le:"FCPSGN"
sink64[0] = math.Copysign(a, b)
// amd64:"BTSQ\t[$]63"
// s390x:"LNDFR\t",-"MOVD\t" (no integer load/store)
+ // ppc64:"FCPSGN"
// ppc64le:"FCPSGN"
// arm64:"ORR", -"AND"
sink64[1] = math.Copysign(c, -1)
@@ -86,6 +94,7 @@ func copysign(a, b, c float64) {
// amd64:-"SHLQ\t[$]1",-"SHRQ\t[$]1","SHRQ\t[$]63","SHLQ\t[$]63","ORQ"
// s390x:"CPSDR\t",-"MOVD\t" (no integer load/store)
+ // ppc64:"FCPSGN"
// ppc64le:"FCPSGN"
sink64[3] = math.Copysign(-1, c)
}
@@ -93,6 +102,8 @@ func copysign(a, b, c float64) {
func fromFloat64(f64 float64) uint64 {
// amd64:"MOVQ\tX.*, [^X].*"
// arm64:"FMOVD\tF.*, R.*"
+ // ppc64:"MFVSRD"
+ // ppc64le:"MFVSRD"
return math.Float64bits(f64+1) + 1
}
@@ -105,6 +116,8 @@ func fromFloat32(f32 float32) uint32 {
func toFloat64(u64 uint64) float64 {
// amd64:"MOVQ\t[^X].*, X.*"
// arm64:"FMOVD\tR.*, F.*"
+ // ppc64:"MTVSRD"
+ // ppc64le:"MTVSRD"
return math.Float64frombits(u64+1) + 1
}
@@ -135,6 +148,7 @@ func constantCheck32() bool {
func constantConvert32(x float32) float32 {
// amd64:"MOVSS\t[$]f32.3f800000\\(SB\\)"
// s390x:"FMOVS\t[$]f32.3f800000\\(SB\\)"
+ // ppc64:"FMOVS\t[$]f32.3f800000\\(SB\\)"
// ppc64le:"FMOVS\t[$]f32.3f800000\\(SB\\)"
// arm64:"FMOVS\t[$]\\(1.0\\)"
if x > math.Float32frombits(0x3f800000) {
@@ -146,6 +160,7 @@ func constantConvert32(x float32) float32 {
func constantConvertInt32(x uint32) uint32 {
// amd64:-"MOVSS"
// s390x:-"FMOVS"
+ // ppc64:-"FMOVS"
// ppc64le:-"FMOVS"
// arm64:-"FMOVS"
if x > math.Float32bits(1) {
diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go
index 9bb2254155..c21de19707 100644
--- a/test/codegen/mathbits.go
+++ b/test/codegen/mathbits.go
@@ -104,6 +104,8 @@ func OnesCount(n uint) int {
// amd64:"POPCNTQ",".*support_popcnt"
// arm64:"VCNT","VUADDLV"
// s390x:"POPCNT"
+ // ppc64:"POPCNTD"
+ // ppc64le:"POPCNTD"
return bits.OnesCount(n)
}
@@ -111,6 +113,8 @@ func OnesCount64(n uint64) int {
// amd64:"POPCNTQ",".*support_popcnt"
// arm64:"VCNT","VUADDLV"
// s390x:"POPCNT"
+ // ppc64:"POPCNTD"
+ // ppc64le:"POPCNTD"
return bits.OnesCount64(n)
}
@@ -118,6 +122,8 @@ func OnesCount32(n uint32) int {
// amd64:"POPCNTL",".*support_popcnt"
// arm64:"VCNT","VUADDLV"
// s390x:"POPCNT"
+ // ppc64:"POPCNTW"
+ // ppc64le:"POPCNTW"
return bits.OnesCount32(n)
}
@@ -125,11 +131,15 @@ func OnesCount16(n uint16) int {
// amd64:"POPCNTL",".*support_popcnt"
// arm64:"VCNT","VUADDLV"
// s390x:"POPCNT"
+ // ppc64:"POPCNTW"
+ // ppc64le:"POPCNTW"
return bits.OnesCount16(n)
}
func OnesCount8(n uint8) int {
// s390x:"POPCNT"
+ // ppc64:"POPCNTB"
+ // ppc64le:"POPCNTB"
return bits.OnesCount8(n)
}
@@ -171,6 +181,7 @@ func RotateLeft64(n uint64) uint64 {
// amd64:"ROLQ"
// arm64:"ROR"
// ppc64:"ROTL"
+ // ppc64le:"ROTL"
// s390x:"RLLG"
return bits.RotateLeft64(n, 37)
}
@@ -179,6 +190,7 @@ func RotateLeft32(n uint32) uint32 {
// amd64:"ROLL" 386:"ROLL"
// arm64:"RORW"
// ppc64:"ROTLW"
+ // ppc64le:"ROTLW"
// s390x:"RLL"
return bits.RotateLeft32(n, 9)
}
@@ -197,6 +209,7 @@ func RotateLeftVariable(n uint, m int) uint {
// amd64:"ROLQ"
// arm64:"ROR"
// ppc64:"ROTL"
+ // ppc64le:"ROTL"
// s390x:"RLLG"
return bits.RotateLeft(n, m)
}
@@ -205,6 +218,7 @@ func RotateLeftVariable64(n uint64, m int) uint64 {
// amd64:"ROLQ"
// arm64:"ROR"
// ppc64:"ROTL"
+ // ppc64le:"ROTL"
// s390x:"RLLG"
return bits.RotateLeft64(n, m)
}
@@ -213,6 +227,7 @@ func RotateLeftVariable32(n uint32, m int) uint32 {
// amd64:"ROLL"
// arm64:"RORW"
// ppc64:"ROTLW"
+ // ppc64le:"ROTLW"
// s390x:"RLL"
return bits.RotateLeft32(n, m)
}
@@ -224,24 +239,32 @@ func RotateLeftVariable32(n uint32, m int) uint32 {
func TrailingZeros(n uint) int {
// amd64:"BSFQ","MOVL\t\\$64","CMOVQEQ"
// s390x:"FLOGR"
+ // ppc64:"ANDN","POPCNTD"
+ // ppc64le:"ANDN","POPCNTD"
return bits.TrailingZeros(n)
}
func TrailingZeros64(n uint64) int {
// amd64:"BSFQ","MOVL\t\\$64","CMOVQEQ"
// s390x:"FLOGR"
+ // ppc64:"ANDN","POPCNTD"
+ // ppc64le:"ANDN","POPCNTD"
return bits.TrailingZeros64(n)
}
func TrailingZeros32(n uint32) int {
// amd64:"BTSQ\\t\\$32","BSFQ"
// s390x:"FLOGR","MOVWZ"
+ // ppc64:"ANDN","POPCNTW"
+ // ppc64le:"ANDN","POPCNTW"
return bits.TrailingZeros32(n)
}
func TrailingZeros16(n uint16) int {
// amd64:"BSFL","BTSL\\t\\$16"
// s390x:"FLOGR","OR\t\\$65536"
+ // ppc64:"POPCNTD","OR\\t\\$65536"
+ // ppc64le:"POPCNTD","OR\\t\\$65536"
return bits.TrailingZeros16(n)
}
@@ -310,13 +333,15 @@ func IterateBits8(n uint8) int {
func Mul(x, y uint) (hi, lo uint) {
// amd64:"MULQ"
// arm64:"UMULH","MUL"
- // ppc64: "MULHDU", "MULLD"
+ // ppc64:"MULHDU","MULLD"
+ // ppc64le:"MULHDU","MULLD"
return bits.Mul(x, y)
}
func Mul64(x, y uint64) (hi, lo uint64) {
// amd64:"MULQ"
// arm64:"UMULH","MUL"
- // ppc64: "MULHDU", "MULLD"
+ // ppc64:"MULHDU","MULLD"
+ // ppc64le:"MULHDU","MULLD"
return bits.Mul64(x, y)
}
diff --git a/test/codegen/memcombine.go b/test/codegen/memcombine.go
index 9c4b36818e..b3d2cb2067 100644
--- a/test/codegen/memcombine.go
+++ b/test/codegen/memcombine.go
@@ -113,16 +113,22 @@ func load_be16_idx(b []byte, idx int) {
func load_le_byte2_uint16(s []byte) uint16 {
// arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR`,-`MOVB`
+ // 386:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL`
+ // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL`
return uint16(s[0]) | uint16(s[1])<<8
}
func load_le_byte2_uint16_inv(s []byte) uint16 {
// arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR`,-`MOVB`
+ // 386:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL`
+ // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL`
return uint16(s[1])<<8 | uint16(s[0])
}
func load_le_byte4_uint32(s []byte) uint32 {
// arm64:`MOVWU\t\(R[0-9]+\)`,-`ORR`,-`MOV[BH]`
+ // 386:`MOVL\s\([A-Z]+\)`,-`MOVB`,-`OR`-`MOVW`
+ // amd64:`MOVL\s\([A-Z]+\)`,-`MOVB`,-`OR`-`MOVW`
return uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24
}
@@ -133,21 +139,25 @@ func load_le_byte4_uint32_inv(s []byte) uint32 {
func load_le_byte8_uint64(s []byte) uint64 {
// arm64:`MOVD\t\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]`
+ // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+`
return uint64(s[0]) | uint64(s[1])<<8 | uint64(s[2])<<16 | uint64(s[3])<<24 | uint64(s[4])<<32 | uint64(s[5])<<40 | uint64(s[6])<<48 | uint64(s[7])<<56
}
func load_le_byte8_uint64_inv(s []byte) uint64 {
// arm64:`MOVD\t\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]`
+ // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+`
return uint64(s[7])<<56 | uint64(s[6])<<48 | uint64(s[5])<<40 | uint64(s[4])<<32 | uint64(s[3])<<24 | uint64(s[2])<<16 | uint64(s[1])<<8 | uint64(s[0])
}
func load_be_byte2_uint16(s []byte) uint16 {
// arm64:`MOVHU\t\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB`
+ // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL`
return uint16(s[0])<<8 | uint16(s[1])
}
func load_be_byte2_uint16_inv(s []byte) uint16 {
// arm64:`MOVHU\t\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB`
+ // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL`
return uint16(s[1]) | uint16(s[0])<<8
}
@@ -158,31 +168,39 @@ func load_be_byte4_uint32(s []byte) uint32 {
func load_be_byte4_uint32_inv(s []byte) uint32 {
// arm64:`MOVWU\t\(R[0-9]+\)`,`REVW`,-`ORR`,-`REV16W`,-`MOV[BH]`
+ // amd64:`MOVL\s\([A-Z]+\)`,-`MOVB`,-`OR`,-`MOVW`
return uint32(s[3]) | uint32(s[2])<<8 | uint32(s[1])<<16 | uint32(s[0])<<24
}
func load_be_byte8_uint64(s []byte) uint64 {
// arm64:`MOVD\t\(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]`
+ // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+`
return uint64(s[0])<<56 | uint64(s[1])<<48 | uint64(s[2])<<40 | uint64(s[3])<<32 | uint64(s[4])<<24 | uint64(s[5])<<16 | uint64(s[6])<<8 | uint64(s[7])
}
func load_be_byte8_uint64_inv(s []byte) uint64 {
// arm64:`MOVD\t\(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]`
+ // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+`
return uint64(s[7]) | uint64(s[6])<<8 | uint64(s[5])<<16 | uint64(s[4])<<24 | uint64(s[3])<<32 | uint64(s[2])<<40 | uint64(s[1])<<48 | uint64(s[0])<<56
}
func load_le_byte2_uint16_idx(s []byte, idx int) uint16 {
// arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOVB`
+ // 386:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`ORL`,-`MOVB`
+ // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB`
return uint16(s[idx]) | uint16(s[idx+1])<<8
}
func load_le_byte2_uint16_idx_inv(s []byte, idx int) uint16 {
// arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOVB`
+ // 386:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`ORL`,-`MOVB`
+ // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB`
return uint16(s[idx+1])<<8 | uint16(s[idx])
}
func load_le_byte4_uint32_idx(s []byte, idx int) uint32 {
// arm64:`MOVWU\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOV[BH]`
+ // amd64:`MOVL\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB`,-`MOVW`
return uint32(s[idx]) | uint32(s[idx+1])<<8 | uint32(s[idx+2])<<16 | uint32(s[idx+3])<<24
}
@@ -193,6 +211,7 @@ func load_le_byte4_uint32_idx_inv(s []byte, idx int) uint32 {
func load_le_byte8_uint64_idx(s []byte, idx int) uint64 {
// arm64:`MOVD\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]`
+ // amd64:`MOVQ\s\([A-Z]+\)\([A-Z]+`
return uint64(s[idx]) | uint64(s[idx+1])<<8 | uint64(s[idx+2])<<16 | uint64(s[idx+3])<<24 | uint64(s[idx+4])<<32 | uint64(s[idx+5])<<40 | uint64(s[idx+6])<<48 | uint64(s[idx+7])<<56
}
@@ -203,11 +222,13 @@ func load_le_byte8_uint64_idx_inv(s []byte, idx int) uint64 {
func load_be_byte2_uint16_idx(s []byte, idx int) uint16 {
// arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB`
+ // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB`
return uint16(s[idx])<<8 | uint16(s[idx+1])
}
func load_be_byte2_uint16_idx_inv(s []byte, idx int) uint16 {
// arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB`
+ // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB`
return uint16(s[idx+1]) | uint16(s[idx])<<8
}
@@ -405,45 +426,67 @@ func store_be16_idx(b []byte, idx int) {
func store_le_byte_2(b []byte, val uint16) {
_ = b[2]
// arm64:`MOVH\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`
+ // 386:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`
+ // amd64:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`
b[1], b[2] = byte(val), byte(val>>8)
}
+func store_le_byte_2_inv(b []byte, val uint16) {
+ _ = b[2]
+ // 386:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`
+ // amd64:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`
+ b[2], b[1] = byte(val>>8), byte(val)
+}
+
func store_le_byte_4(b []byte, val uint32) {
_ = b[4]
// arm64:`MOVW\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`
+ // 386:`MOVL\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`
+ // amd64:`MOVL\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`
b[1], b[2], b[3], b[4] = byte(val), byte(val>>8), byte(val>>16), byte(val>>24)
}
func store_le_byte_8(b []byte, val uint64) {
_ = b[8]
// arm64:`MOVD\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`,-`MOVW`
+ // amd64:`MOVQ\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`,-`MOVL`
b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8] = byte(val), byte(val>>8), byte(val>>16), byte(val>>24), byte(val>>32), byte(val>>40), byte(val>>48), byte(val>>56)
}
func store_be_byte_2(b []byte, val uint16) {
_ = b[2]
// arm64:`REV16W`,`MOVH\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`
+ // amd64:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`
b[1], b[2] = byte(val>>8), byte(val)
}
func store_be_byte_4(b []byte, val uint32) {
_ = b[4]
// arm64:`REVW`,`MOVW\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`,-`REV16W`
+ // amd64:`MOVL\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`
b[1], b[2], b[3], b[4] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val)
}
func store_be_byte_8(b []byte, val uint64) {
_ = b[8]
// arm64:`REV`,`MOVD\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`,-`MOVW`,-`REV16W`,-`REVW`
+ // amd64:`MOVQ\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`,-`MOVL`
b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8] = byte(val>>56), byte(val>>48), byte(val>>40), byte(val>>32), byte(val>>24), byte(val>>16), byte(val>>8), byte(val)
}
func store_le_byte_2_idx(b []byte, idx int, val uint16) {
_, _ = b[idx+0], b[idx+1]
// arm64:`MOVH\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,-`MOVB`
+ // 386:`MOVW\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+`,-`MOVB`
b[idx+1], b[idx+0] = byte(val>>8), byte(val)
}
+func store_le_byte_2_idx_inv(b []byte, idx int, val uint16) {
+ _, _ = b[idx+0], b[idx+1]
+ // 386:`MOVW\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+`,-`MOVB`
+ b[idx+0], b[idx+1] = byte(val), byte(val>>8)
+}
+
func store_le_byte_4_idx(b []byte, idx int, val uint32) {
_, _, _, _ = b[idx+0], b[idx+1], b[idx+2], b[idx+3]
// arm64:`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,-`MOVB`,-`MOVH`
@@ -468,6 +511,24 @@ func store_be_byte_2_idx2(b []byte, idx int, val uint16) {
b[(idx<<1)+0], b[(idx<<1)+1] = byte(val>>8), byte(val)
}
+func store_le_byte_2_idx2(b []byte, idx int, val uint16) {
+ _, _ = b[(idx<<1)+0], b[(idx<<1)+1]
+ // arm64:`MOVH\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<1\)`,-`MOVB`
+ b[(idx<<1)+1], b[(idx<<1)+0] = byte(val>>8), byte(val)
+}
+
+func store_be_byte_4_idx4(b []byte, idx int, val uint32) {
+ _, _, _, _ = b[(idx<<2)+0], b[(idx<<2)+1], b[(idx<<2)+2], b[(idx<<2)+3]
+ // arm64:`REVW`,`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<2\)`,-`MOVB`,-`MOVH`,-`REV16W`
+ b[(idx<<2)+0], b[(idx<<2)+1], b[(idx<<2)+2], b[(idx<<2)+3] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val)
+}
+
+func store_le_byte_4_idx4_inv(b []byte, idx int, val uint32) {
+ _, _, _, _ = b[(idx<<2)+0], b[(idx<<2)+1], b[(idx<<2)+2], b[(idx<<2)+3]
+ // arm64:`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<2\)`,-`MOVB`,-`MOVH`
+ b[(idx<<2)+3], b[(idx<<2)+2], b[(idx<<2)+1], b[(idx<<2)+0] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val)
+}
+
// ------------- //
// Zeroing //
// ------------- //
@@ -477,14 +538,24 @@ func store_be_byte_2_idx2(b []byte, idx int, val uint16) {
func zero_byte_2(b1, b2 []byte) {
// bounds checks to guarantee safety of writes below
_, _ = b1[1], b2[1]
- b1[0], b1[1] = 0, 0 // arm64:"MOVH\tZR",-"MOVB"
- b2[1], b2[0] = 0, 0 // arm64:"MOVH\tZR",-"MOVB"
+ // arm64:"MOVH\tZR",-"MOVB"
+ // amd64:`MOVW\s[$]0,\s\([A-Z]+\)`
+ // 386:`MOVW\s[$]0,\s\([A-Z]+\)`
+ b1[0], b1[1] = 0, 0
+ // arm64:"MOVH\tZR",-"MOVB"
+ // 386:`MOVW\s[$]0,\s\([A-Z]+\)`
+ // amd64:`MOVW\s[$]0,\s\([A-Z]+\)`
+ b2[1], b2[0] = 0, 0
}
func zero_byte_4(b1, b2 []byte) {
_, _ = b1[3], b2[3]
- b1[0], b1[1], b1[2], b1[3] = 0, 0, 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
- b2[2], b2[3], b2[1], b2[0] = 0, 0, 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
+ // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
+ // amd64:`MOVL\s[$]0,\s\([A-Z]+\)`
+ // 386:`MOVL\s[$]0,\s\([A-Z]+\)`
+ b1[0], b1[1], b1[2], b1[3] = 0, 0, 0, 0
+ // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
+ b2[2], b2[3], b2[1], b2[0] = 0, 0, 0, 0
}
func zero_byte_8(b []byte) {
@@ -501,28 +572,6 @@ func zero_byte_16(b []byte) {
b[12], b[13], b[14], b[15] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW"
}
-/* TODO: enable them when corresponding optimization are implemented
-func zero_byte_4_idx(b []byte, idx int) {
- // arm64(DISABLED): `MOVW\sZR,\s\(R[0-9]+\)\(R[0-9]+<<2\)`,-`MOV[BH]`
- b[(idx<<2)+0] = 0
- b[(idx<<2)+1] = 0
- b[(idx<<2)+2] = 0
- b[(idx<<2)+3] = 0
-}
-
-func zero_byte_8_idx(b []byte, idx int) {
- // arm64(DISABLED): `MOVD\sZR,\s\(R[0-9]+\)\(R[0-9]+<<3\)`,-`MOV[BHW]`
- b[(idx<<3)+0] = 0
- b[(idx<<3)+1] = 0
- b[(idx<<3)+2] = 0
- b[(idx<<3)+3] = 0
- b[(idx<<3)+4] = 0
- b[(idx<<3)+5] = 0
- b[(idx<<3)+6] = 0
- b[(idx<<3)+7] = 0
-}
-*/
-
func zero_byte_30(a *[30]byte) {
*a = [30]byte{} // arm64:"STP",-"MOVB",-"MOVH",-"MOVW"
}
@@ -545,14 +594,23 @@ func zero_byte_2_idx2(b []byte, idx int) {
func zero_uint16_2(h1, h2 []uint16) {
_, _ = h1[1], h2[1]
- h1[0], h1[1] = 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
- h2[1], h2[0] = 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
+ // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
+ // amd64:`MOVL\s[$]0,\s\([A-Z]+\)`
+ // 386:`MOVL\s[$]0,\s\([A-Z]+\)`
+ h1[0], h1[1] = 0, 0
+ // arm64:"MOVW\tZR",-"MOVB",-"MOVH"
+ // amd64:`MOVL\s[$]0,\s\([A-Z]+\)`
+ // 386:`MOVL\s[$]0,\s\([A-Z]+\)`
+ h2[1], h2[0] = 0, 0
}
func zero_uint16_4(h1, h2 []uint16) {
_, _ = h1[3], h2[3]
- h1[0], h1[1], h1[2], h1[3] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
- h2[2], h2[3], h2[1], h2[0] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)`
+ h1[0], h1[1], h1[2], h1[3] = 0, 0, 0, 0
+ // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ h2[2], h2[3], h2[1], h2[0] = 0, 0, 0, 0
}
func zero_uint16_8(h []uint16) {
@@ -563,8 +621,12 @@ func zero_uint16_8(h []uint16) {
func zero_uint32_2(w1, w2 []uint32) {
_, _ = w1[1], w2[1]
- w1[0], w1[1] = 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
- w2[1], w2[0] = 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)`
+ w1[0], w1[1] = 0, 0
+ // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)`
+ w2[1], w2[0] = 0, 0
}
func zero_uint32_4(w1, w2 []uint32) {
diff --git a/test/codegen/noextend.go b/test/codegen/noextend.go
index ee4900226c..46bfe3f2f9 100644
--- a/test/codegen/noextend.go
+++ b/test/codegen/noextend.go
@@ -21,30 +21,38 @@ var val8 [8]uint8
func set16(x8 int8, u8 uint8, y8 int8, z8 uint8) {
// Truncate not needed, load does sign/zero extend
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
sval16[0] = int16(x8)
+ // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
val16[0] = uint16(u8)
// AND not needed due to size
+ // ppc64:-"ANDCC"
// ppc64le:-"ANDCC"
sval16[1] = 255 & int16(x8+y8)
+ // ppc64:-"ANDCC"
// ppc64le:-"ANDCC"
val16[1] = 255 & uint16(u8+z8)
}
func shiftidx(x8 int8, u8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) {
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
sval16[0] = int16(val16[x8>>1])
+ // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
val16[0] = uint16(sval16[u8>>2])
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
sval16[1] = int16(val16[x16>>1])
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
val16[1] = uint16(sval16[u16>>2])
@@ -53,87 +61,109 @@ func shiftidx(x8 int8, u8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) {
func setnox(x8 int8, u8 uint8, y8 int8, z8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) {
// Truncate not needed due to sign/zero extension on load
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
sval16[0] = int16(x8)
+ // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
val16[0] = uint16(u8)
// AND not needed due to size
+ // ppc64:-"ANDCC"
// ppc64le:-"ANDCC"
sval16[1] = 255 & int16(x8+y8)
+ // ppc64:-"ANDCC"
// ppc64le:-"ANDCC"
val16[1] = 255 & uint16(u8+z8)
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
sval32[0] = int32(x8)
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
sval32[1] = int32(x16)
+ //ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
//ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
val32[0] = uint32(u8)
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
val32[1] = uint32(u16)
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
sval64[0] = int64(x8)
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
sval64[1] = int64(x16)
+ // ppc64:-"MOVW\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVW\tR\\d+,\\sR\\d+"
sval64[2] = int64(x32)
+ //ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
//ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
val64[0] = uint64(u8)
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
val64[1] = uint64(u16)
+ // ppc64:-"MOVWZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVWZ\tR\\d+,\\sR\\d+"
val64[2] = uint64(u32)
}
func cmp16(x8 int8, u8 uint8, x32 int32, u32 uint32, x64 int64, u64 uint64) bool {
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
if int16(x8) == sval16[0] {
return true
}
+ // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
if uint16(u8) == val16[0] {
return true
}
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
if uint16(u32>>16) == val16[0] {
return true
}
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
if uint16(u64>>48) == val16[0] {
return true
}
// Verify the truncates are using the correct sign.
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
if int16(x32) == sval16[0] {
return true
}
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
if uint16(u32) == val16[0] {
return true
}
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
if int16(x64) == sval16[0] {
return true
}
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
if uint16(u64) == val16[0] {
return true
@@ -143,32 +173,38 @@ func cmp16(x8 int8, u8 uint8, x32 int32, u32 uint32, x64 int64, u64 uint64) bool
}
func cmp32(x8 int8, u8 uint8, x16 int16, u16 uint16, x64 int64, u64 uint64) bool {
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
if int32(x8) == sval32[0] {
return true
}
+ // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
if uint32(u8) == val32[0] {
return true
}
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
if int32(x16) == sval32[0] {
return true
}
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
if uint32(u16) == val32[0] {
return true
}
// Verify the truncates are using the correct sign.
+ // ppc64:-"MOVWZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVWZ\tR\\d+,\\sR\\d+"
if int32(x64) == sval32[0] {
return true
}
+ // ppc64:-"MOVW\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVW\tR\\d+,\\sR\\d+"
if uint32(u64) == val32[0] {
return true
@@ -179,31 +215,37 @@ func cmp32(x8 int8, u8 uint8, x16 int16, u16 uint16, x64 int64, u64 uint64) bool
func cmp64(x8 int8, u8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) bool {
+ // ppc64:-"MOVB\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVB\tR\\d+,\\sR\\d+"
if int64(x8) == sval64[0] {
return true
}
+ // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+"
if uint64(u8) == val64[0] {
return true
}
+ // ppc64:-"MOVH\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVH\tR\\d+,\\sR\\d+"
if int64(x16) == sval64[0] {
return true
}
+ // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+"
if uint64(u16) == val64[0] {
return true
}
+ // ppc64:-"MOVW\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVW\tR\\d+,\\sR\\d+"
if int64(x32) == sval64[0] {
return true
}
+ // ppc64:-"MOVWZ\tR\\d+,\\sR\\d+"
// ppc64le:-"MOVWZ\tR\\d+,\\sR\\d+"
if uint64(u32) == val64[0] {
return true
diff --git a/test/codegen/rotate.go b/test/codegen/rotate.go
index 5812e1c0b1..ce24b57877 100644
--- a/test/codegen/rotate.go
+++ b/test/codegen/rotate.go
@@ -16,18 +16,21 @@ func rot64(x uint64) uint64 {
// amd64:"ROLQ\t[$]7"
// arm64:"ROR\t[$]57"
// s390x:"RLLG\t[$]7"
+ // ppc64:"ROTL\t[$]7"
// ppc64le:"ROTL\t[$]7"
a += x<<7 | x>>57
// amd64:"ROLQ\t[$]8"
// arm64:"ROR\t[$]56"
// s390x:"RLLG\t[$]8"
+ // ppc64:"ROTL\t[$]8"
// ppc64le:"ROTL\t[$]8"
a += x<<8 + x>>56
// amd64:"ROLQ\t[$]9"
// arm64:"ROR\t[$]55"
// s390x:"RLLG\t[$]9"
+ // ppc64:"ROTL\t[$]9"
// ppc64le:"ROTL\t[$]9"
a += x<<9 ^ x>>55
@@ -41,6 +44,7 @@ func rot32(x uint32) uint32 {
// arm:"MOVW\tR\\d+@>25"
// arm64:"RORW\t[$]25"
// s390x:"RLL\t[$]7"
+ // ppc64:"ROTLW\t[$]7"
// ppc64le:"ROTLW\t[$]7"
a += x<<7 | x>>25
@@ -48,6 +52,7 @@ func rot32(x uint32) uint32 {
// arm:"MOVW\tR\\d+@>24"
// arm64:"RORW\t[$]24"
// s390x:"RLL\t[$]8"
+ // ppc64:"ROTLW\t[$]8"
// ppc64le:"ROTLW\t[$]8"
a += x<<8 + x>>24
@@ -55,6 +60,7 @@ func rot32(x uint32) uint32 {
// arm:"MOVW\tR\\d+@>23"
// arm64:"RORW\t[$]23"
// s390x:"RLL\t[$]9"
+ // ppc64:"ROTLW\t[$]9"
// ppc64le:"ROTLW\t[$]9"
a += x<<9 ^ x>>23
@@ -101,6 +107,7 @@ func rot64nc(x uint64, z uint) uint64 {
z &= 63
// amd64:"ROLQ"
+ // ppc64:"ROTL"
// ppc64le:"ROTL"
a += x<>(64-z)
@@ -116,6 +123,7 @@ func rot32nc(x uint32, z uint) uint32 {
z &= 31
// amd64:"ROLL"
+ // ppc64:"ROTLW"
// ppc64le:"ROTLW"
a += x<>(32-z)
diff --git a/test/codegen/stack.go b/test/codegen/stack.go
index 0f2f6178c7..ed2c1ed959 100644
--- a/test/codegen/stack.go
+++ b/test/codegen/stack.go
@@ -18,6 +18,7 @@ import "runtime"
// arm:"TEXT\t.*, [$]-4-"
// arm64:"TEXT\t.*, [$]0-"
// mips:"TEXT\t.*, [$]-4-"
+// ppc64:"TEXT\t.*, [$]0-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func StackStore() int {
@@ -37,6 +38,7 @@ type T struct {
// arm:"TEXT\t.*, [$]0-" (spills return address)
// arm64:"TEXT\t.*, [$]0-"
// mips:"TEXT\t.*, [$]-4-"
+// ppc64:"TEXT\t.*, [$]0-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func ZeroLargeStruct(x *T) {
@@ -51,6 +53,7 @@ func ZeroLargeStruct(x *T) {
// amd64:"TEXT\t.*, [$]0-"
// arm:"TEXT\t.*, [$]0-" (spills return address)
// arm64:"TEXT\t.*, [$]0-"
+// ppc64:"TEXT\t.*, [$]0-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
// Note: that 386 currently has to spill a register.
@@ -65,6 +68,7 @@ func KeepWanted(t *T) {
// - arm & mips fail due to softfloat calls
// amd64:"TEXT\t.*, [$]0-"
// arm64:"TEXT\t.*, [$]0-"
+// ppc64:"TEXT\t.*, [$]0-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func ArrayAdd64(a, b [4]float64) [4]float64 {
@@ -78,6 +82,7 @@ func ArrayAdd64(a, b [4]float64) [4]float64 {
// arm:"TEXT\t.*, [$]0-" (spills return address)
// arm64:"TEXT\t.*, [$]0-"
// mips:"TEXT\t.*, [$]-4-"
+// ppc64:"TEXT\t.*, [$]0-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func ArrayInit(i, j int) [4]int {
diff --git a/test/codegen/strings.go b/test/codegen/strings.go
index ccb6bd4273..39ee2e8b9f 100644
--- a/test/codegen/strings.go
+++ b/test/codegen/strings.go
@@ -13,3 +13,37 @@ func CountRunes(s string) int { // Issue #24923
// amd64:`.*countrunes`
return len([]rune(s))
}
+
+func ToByteSlice() []byte { // Issue #24698
+ // amd64:`LEAQ\ttype\.\[3\]uint8`
+ // amd64:`CALL\truntime\.newobject`
+ // amd64:-`.*runtime.stringtoslicebyte`
+ return []byte("foo")
+}
+
+// Loading from read-only symbols should get transformed into constants.
+func ConstantLoad() {
+ // 12592 = 0x3130
+ // 50 = 0x32
+ // amd64:`MOVW\t\$12592, \(`,`MOVB\t\$50, 2\(`
+ // 386:`MOVW\t\$12592, \(`,`MOVB\t\$50, 2\(`
+ // arm:`MOVW\t\$48`,`MOVW\t\$49`,`MOVW\t\$50`
+ // arm64:`MOVD\t\$12592`,`MOVD\t\$50`
+ bsink = []byte("012")
+
+ // 858927408 = 0x33323130
+ // 13620 = 0x3534
+ // amd64:`MOVL\t\$858927408`,`MOVW\t\$13620, 4\(`
+ // 386:`MOVL\t\$858927408`,`MOVW\t\$13620, 4\(`
+ // arm64:`MOVD\t\$858927408`,`MOVD\t\$13620`
+ bsink = []byte("012345")
+
+ // 3978425819141910832 = 0x3736353433323130
+ // 7306073769690871863 = 0x6564636261393837
+ // amd64:`MOVQ\t\$3978425819141910832`,`MOVQ\t\$7306073769690871863`
+ // 386:`MOVL\t\$858927408, \(`,`DUFFCOPY`
+ // arm64:`MOVD\t\$3978425819141910832`,`MOVD\t\$1650538808`,`MOVD\t\$25699`,`MOVD\t\$101`
+ bsink = []byte("0123456789abcde")
+}
+
+var bsink []byte
diff --git a/test/fixedbugs/bug506.dir/a.go b/test/fixedbugs/bug506.dir/a.go
new file mode 100644
index 0000000000..8e8a200581
--- /dev/null
+++ b/test/fixedbugs/bug506.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type internal struct {
+ f1 string
+ f2 float64
+}
+
+type S struct {
+ F struct {
+ I internal
+ }
+}
diff --git a/test/unsafereject1.go b/test/fixedbugs/bug506.dir/main.go
similarity index 50%
rename from test/unsafereject1.go
rename to test/fixedbugs/bug506.dir/main.go
index 12f77f963f..1b60e40d8d 100644
--- a/test/unsafereject1.go
+++ b/test/fixedbugs/bug506.dir/main.go
@@ -1,16 +1,20 @@
-// errorcheck -u -+
-
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Check that we cannot import a package that uses "unsafe" internally
-// when -u is supplied.
-
package main
-import "syscall" // ERROR "import unsafe package"
+import (
+ "fmt"
+
+ "./a"
+)
+
+var v = a.S{}
func main() {
- print(syscall.Environ())
+ want := "{{ 0}}"
+ if got := fmt.Sprint(v.F); got != want {
+ panic(got)
+ }
}
diff --git a/test/fixedbugs/bug506.go b/test/fixedbugs/bug506.go
new file mode 100644
index 0000000000..7c8ccc6ec7
--- /dev/null
+++ b/test/fixedbugs/bug506.go
@@ -0,0 +1,10 @@
+// rundir
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Gccgo caused an undefined symbol reference building hash functions
+// for an imported struct with unexported fields.
+
+package ignored
diff --git a/test/fixedbugs/issue20250.go b/test/fixedbugs/issue20250.go
index 47879385d2..c190515274 100644
--- a/test/fixedbugs/issue20250.go
+++ b/test/fixedbugs/issue20250.go
@@ -11,13 +11,13 @@
package p
type T struct {
- s string
+ s [2]string
}
func f(a T) { // ERROR "live at entry to f: a"
var e interface{} // ERROR "stack object e interface \{\}$"
func() { // ERROR "live at entry to f.func1: a &e"
- e = a.s // ERROR "live at call to convT2Estring: &e" "stack object a T$"
+ e = a.s // ERROR "live at call to convT2E: &e" "stack object a T$"
}()
// Before the fix, both a and e were live at the previous line.
_ = e
diff --git a/test/fixedbugs/issue22327.go b/test/fixedbugs/issue22327.go
new file mode 100644
index 0000000000..7b21d83402
--- /dev/null
+++ b/test/fixedbugs/issue22327.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Using a multi-result function as an argument to
+// append should compile successfully. Previously there
+// was a missing *int -> interface{} conversion that caused
+// the compiler to ICE.
+
+package p
+
+func f() ([]interface{}, *int) {
+ return nil, nil
+}
+
+var _ = append(f())
diff --git a/test/fixedbugs/issue26411.go b/test/fixedbugs/issue26411.go
new file mode 100644
index 0000000000..5f40bf2522
--- /dev/null
+++ b/test/fixedbugs/issue26411.go
@@ -0,0 +1,92 @@
+// +build !nacl,!js
+// run
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Ensure that label redefinition errors print out
+// a column number that matches the start of the current label's
+// definition instead of the label delimiting token ":"
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+)
+
+func main() {
+ tmpdir, err := ioutil.TempDir("", "issue26411")
+ if err != nil {
+ log.Fatalf("Failed to create temporary directory: %v", err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ tests := []struct {
+ code string
+ errors []string
+ }{
+ {
+ code: `
+package main
+
+func main() {
+foo:
+foo:
+}
+`,
+ errors: []string{
+ "^.+:5:1: label foo defined and not used\n",
+ ".+:6:1: label foo already defined at .+:5:1\n$",
+ },
+ },
+ {
+ code: `
+package main
+
+func main() {
+
+ bar:
+ bar:
+bar:
+bar :
+}
+`,
+
+ errors: []string{
+ "^.+:6:13: label bar defined and not used\n",
+ ".+:7:4: label bar already defined at .+:6:13\n",
+ ".+:8:1: label bar already defined at .+:6:13\n",
+ ".+:9:1: label bar already defined at .+:6:13\n$",
+ },
+ },
+ }
+
+ for i, test := range tests {
+ filename := filepath.Join(tmpdir, fmt.Sprintf("%d.go", i))
+ if err := ioutil.WriteFile(filename, []byte(test.code), 0644); err != nil {
+ log.Printf("#%d: failed to create file %s", i, filename)
+ continue
+ }
+ output, _ := exec.Command("go", "tool", "compile", filename).CombinedOutput()
+
+ // remove each matching error from the output
+ for _, err := range test.errors {
+ rx := regexp.MustCompile(err)
+ match := rx.Find(output)
+ output = bytes.Replace(output, match, nil, 1) // remove match (which might be nil) from output
+ }
+
+ // at this point all output should have been consumed
+ if len(output) != 0 {
+ log.Printf("Test case %d has unmatched errors:\n%s", i, output)
+ }
+ }
+}
diff --git a/test/fixedbugs/issue28055.go b/test/fixedbugs/issue28055.go
new file mode 100644
index 0000000000..d4889d54d4
--- /dev/null
+++ b/test/fixedbugs/issue28055.go
@@ -0,0 +1,16 @@
+// compile
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure VARDEF can be a top-level statement.
+
+package p
+
+func f() {
+ var s string
+ var as []string
+ switch false && (s+"a"+as[0]+s+as[0]+s == "") {
+ }
+}
diff --git a/test/fixedbugs/issue28058.go b/test/fixedbugs/issue28058.go
new file mode 100644
index 0000000000..d8206e7357
--- /dev/null
+++ b/test/fixedbugs/issue28058.go
@@ -0,0 +1,13 @@
+// errorcheck
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 14988: declaring a map with an invalid key type should not cause a
+// fatal panic.
+
+package main
+
+var x map[func()]int // ERROR "invalid map key type"
+var X map[func()]int // ERROR "invalid map key type"
diff --git a/test/fixedbugs/issue5089.go b/test/fixedbugs/issue5089.go
index 9f7fa5a4d4..dc393e9b06 100644
--- a/test/fixedbugs/issue5089.go
+++ b/test/fixedbugs/issue5089.go
@@ -8,7 +8,7 @@
package p
-import "bufio" // GCCGO_ERROR "previous"
+import "bufio"
func (b *bufio.Reader) Buffered() int { // ERROR "non-local|redefinition"
return -1
diff --git a/test/live.go b/test/live.go
index 679562d9bf..a508947afc 100644
--- a/test/live.go
+++ b/test/live.go
@@ -141,7 +141,7 @@ var i9 interface{}
func f9() bool {
g8()
x := i9
- y := interface{}(str()) // ERROR "live at call to convT2Estring: x.data$" "live at call to str: x.data$" "stack object .autotmp_[0-9]+ string$"
+ y := interface{}(g18()) // ERROR "live at call to convT2E: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$"
i9 = y // make y escape so the line above has to call convT2E
return x != y
}
@@ -256,8 +256,8 @@ func f16() {
if b {
delete(mi, iface()) // ERROR "stack object .autotmp_[0-9]+ interface \{\}$"
}
- delete(mi, iface()) // ERROR "stack object .autotmp_[0-9]+ interface \{\}$"
- delete(mi, iface()) // ERROR "stack object .autotmp_[0-9]+ interface \{\}$"
+ delete(mi, iface())
+ delete(mi, iface())
}
var m2s map[string]*byte
@@ -302,8 +302,8 @@ func f18() {
if b {
z = m2[g18()] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
- z = m2[g18()] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
- z = m2[g18()] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
+ z = m2[g18()]
+ z = m2[g18()]
printbytepointer(z)
}
@@ -319,8 +319,8 @@ func f19() {
if b {
z = <-ch // ERROR "stack object .autotmp_[0-9]+ \*byte$"
}
- z = <-ch // ERROR "stack object .autotmp_[0-9]+ \*byte$"
- z = <-ch // ERROR "stack object .autotmp_[0-9]+ \*byte$" "live at call to chanrecv1: .autotmp_[0-9]+$"
+ z = <-ch
+ z = <-ch // ERROR "live at call to chanrecv1: .autotmp_[0-9]+$"
printbytepointer(z)
}
@@ -329,8 +329,8 @@ func f20() {
if b {
ch <- byteptr() // ERROR "stack object .autotmp_[0-9]+ \*byte$"
}
- ch <- byteptr() // ERROR "stack object .autotmp_[0-9]+ \*byte$"
- ch <- byteptr() // ERROR "stack object .autotmp_[0-9]+ \*byte$"
+ ch <- byteptr()
+ ch <- byteptr()
}
func f21() {
@@ -339,8 +339,8 @@ func f21() {
if b {
z = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
- z = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
- z = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
+ z = m2[[2]string{"x", "y"}]
+ z = m2[[2]string{"x", "y"}]
printbytepointer(z)
}
@@ -351,8 +351,8 @@ func f23() {
if b {
z, ok = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
- z, ok = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
- z, ok = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
+ z, ok = m2[[2]string{"x", "y"}]
+ z, ok = m2[[2]string{"x", "y"}]
printbytepointer(z)
print(ok)
}
@@ -363,8 +363,8 @@ func f24() {
if b {
m2[[2]string{"x", "y"}] = nil // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
- m2[[2]string{"x", "y"}] = nil // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
- m2[[2]string{"x", "y"}] = nil // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
+ m2[[2]string{"x", "y"}] = nil
+ m2[[2]string{"x", "y"}] = nil
}
// defer should not cause spurious ambiguously live variables
@@ -389,8 +389,8 @@ func f26(b bool) {
if b {
print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "stack object .autotmp_[0-9]+ \[3\]interface \{\}$"
}
- print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "stack object .autotmp_[0-9]+ \[3\]interface \{\}$"
- print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "stack object .autotmp_[0-9]+ \[3\]interface \{\}$"
+ print26((*int)(nil), (*int)(nil), (*int)(nil))
+ print26((*int)(nil), (*int)(nil), (*int)(nil))
printnl()
}
@@ -404,8 +404,8 @@ func f27(b bool) {
if b {
call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
}
- call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
- call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
+ call27(func() { x++ })
+ call27(func() { x++ })
printnl()
}
@@ -442,8 +442,8 @@ func f28(b bool) {
if b {
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "stack object .autotmp_[0-9]+ \[10\]string$"
}
- printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "stack object .autotmp_[0-9]+ \[10\]string$"
- printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "stack object .autotmp_[0-9]+ \[10\]string$"
+ printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10)
+ printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10)
}
// map iterator should die on end of range loop
@@ -454,10 +454,10 @@ func f29(b bool) {
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
}
- for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.iter\[string\]int$"
+ for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
- for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.iter\[string\]int$"
+ for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
}
@@ -481,10 +481,10 @@ func f30(b bool) {
printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$"
}
}
- for _, p := range pstructarr { // ERROR "stack object .autotmp_[0-9]+ \[10\]pstruct$"
+ for _, p := range pstructarr {
printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$"
}
- for _, p := range pstructarr { // ERROR "stack object .autotmp_[0-9]+ \[10\]pstruct$"
+ for _, p := range pstructarr {
printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$"
}
}
@@ -493,13 +493,13 @@ func f30(b bool) {
func f31(b1, b2, b3 bool) {
if b1 {
- g31(str()) // ERROR "stack object .autotmp_[0-9]+ string$"
+ g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
if b2 {
- h31(str()) // ERROR "live at call to convT2Estring: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ string$"
+ h31(g18()) // ERROR "live at call to convT2E: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$"
}
if b3 {
- panic(str()) // ERROR "stack object .autotmp_[0-9]+ string$"
+ panic(g18())
}
print(b3)
}
@@ -521,8 +521,8 @@ func f32(b bool) {
if b {
call32(t32.Inc) // ERROR "stack object .autotmp_[0-9]+ struct \{"
}
- call32(t32.Inc) // ERROR "stack object .autotmp_[0-9]+ struct \{"
- call32(t32.Inc) // ERROR "stack object .autotmp_[0-9]+ struct \{"
+ call32(t32.Inc)
+ call32(t32.Inc)
}
//go:noescape
@@ -694,3 +694,12 @@ func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$"
r = q
return // ERROR "live at call to deferreturn: r$"
}
+
+func f42() {
+ var p, q, r int
+ f43([]*int{&p,&q,&r}) // ERROR "stack object .autotmp_[0-9]+ \[3\]\*int$"
+ f43([]*int{&p,&r,&q})
+ f43([]*int{&q,&p,&r})
+}
+//go:noescape
+func f43(a []*int)
diff --git a/test/run.go b/test/run.go
index 3af6d1466b..b6421d5e41 100644
--- a/test/run.go
+++ b/test/run.go
@@ -78,6 +78,7 @@ func main() {
// Disable parallelism if printing or if using a simulator.
if *verbose || len(findExecCmd()) > 0 {
*numParallel = 1
+ *runoutputLimit = 1
}
ratec = make(chan bool, *numParallel)
@@ -1071,10 +1072,10 @@ func splitOutput(out string, wantAuto bool) []string {
// this function will report an error.
// Likewise if outStr does not have an error for a line which has a comment,
// or if the error message does not match the .
-// The syntax is Perl but its best to stick to egrep.
+// The syntax is Perl but it's best to stick to egrep.
//
// Sources files are supplied as fullshort slice.
-// It consists of pairs: full path to source file and it's base name.
+// It consists of pairs: full path to source file and its base name.
func (t *test) errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) {
defer func() {
if *verbose && err != nil {
diff --git a/test/switch5.go b/test/switch5.go
index 6641d582bc..ce95bf8d7b 100644
--- a/test/switch5.go
+++ b/test/switch5.go
@@ -24,8 +24,8 @@ func f0(x int) {
func f1(x float32) {
switch x {
case 5:
- case 5: // ERROR "duplicate case 5 .value 5\.0. in switch"
- case 5.0: // ERROR "duplicate case 5 .value 5\.0. in switch"
+ case 5: // ERROR "duplicate case 5 in switch"
+ case 5.0: // ERROR "duplicate case 5 in switch"
}
}
@@ -44,9 +44,9 @@ func f3(e interface{}) {
case 0: // ERROR "duplicate case 0 in switch"
case int64(0):
case float32(10):
- case float32(10): // ERROR "duplicate case float32\(10\) .value 10\.0. in switch"
+ case float32(10): // ERROR "duplicate case float32\(10\) .value 10. in switch"
case float64(10):
- case float64(10): // ERROR "duplicate case float64\(10\) .value 10\.0. in switch"
+ case float64(10): // ERROR "duplicate case float64\(10\) .value 10. in switch"
}
}
diff --git a/test/typeswitch2.go b/test/typeswitch2.go
index 1160b62e14..5958b7db8e 100644
--- a/test/typeswitch2.go
+++ b/test/typeswitch2.go
@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Verify that various erroneous type switches are caught be the compiler.
+// Verify that various erroneous type switches are caught by the compiler.
// Does not compile.
package main
diff --git a/test/typeswitch3.go b/test/typeswitch3.go
index 58d4cba2d0..1388187566 100644
--- a/test/typeswitch3.go
+++ b/test/typeswitch3.go
@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Verify that erroneous type switches are caught be the compiler.
+// Verify that erroneous type switches are caught by the compiler.
// Issue 2700, among other things.
// Does not compile.
diff --git a/test/uintptrescapes2.go b/test/uintptrescapes2.go
index e7b5d721f5..2c8dfd7102 100644
--- a/test/uintptrescapes2.go
+++ b/test/uintptrescapes2.go
@@ -32,12 +32,12 @@ func G() {
var t int // ERROR "moved to heap"
F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "&t escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
var t2 int // ERROR "moved to heap"
- F3(uintptr(unsafe.Pointer(&t2))) // ERROR "live at call to F3: .?autotmp" "&t2 escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
+ F3(uintptr(unsafe.Pointer(&t2))) // ERROR "live at call to F3: .?autotmp" "&t2 escapes to heap"
}
func H() {
var v int // ERROR "moved to heap"
F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
var v2 int // ERROR "moved to heap"
- F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
+ F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap"
}
diff --git a/test/unsafereject2.go b/test/unsafereject2.go
deleted file mode 100644
index 04ad0578c9..0000000000
--- a/test/unsafereject2.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// errorcheck -u -+
-
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Check that we cannot import the "unsafe" package when -u is supplied.
-
-package a
-
-import "unsafe" // ERROR "import package unsafe"
-
-func Float32bits(f float32) uint32 {
- return *(*uint32)(unsafe.Pointer(&f))
-}